Hacked the scheduler interfaces in Xen.
We now have synchronous pause.
Suspend/death VIRQs have gone away; replace by dom-controller msgs.
Xen no longer knows about PS/2 keyboard/mouse; DOM0 can go straight
at them.
3e5a4e677VBavzM1UZIEcH1B-RlXMA linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h
4060044fVx7-tokvNLKBf_6qBB4lqQ linux-2.4.26-xen-sparse/include/asm-xen/io.h
3e5a4e673p7PEOyHFm3nHkYX6HQYBg linux-2.4.26-xen-sparse/include/asm-xen/irq.h
-3ead095db_LRUXnxaqs0dA1DWhPoQQ linux-2.4.26-xen-sparse/include/asm-xen/keyboard.h
3e5a4e678ddsQOpbSiRdy1GRcDc9WA linux-2.4.26-xen-sparse/include/asm-xen/mmu_context.h
40d06e5b2YWInUX1Xv9amVANwd_2Xg linux-2.4.26-xen-sparse/include/asm-xen/module.h
3f8707e7ZmZ6TxyX0ZUEfvhA2Pb_xQ linux-2.4.26-xen-sparse/include/asm-xen/msr.h
3ddb79bdLX_P6iB7ILiblRLWvebapg xen/common/dom0_ops.c
3e6377e4i0c9GtKN65e99OtRbw3AZw xen/common/dom_mem_ops.c
3ddb79bdYO5D8Av12NHqPeSviav7cg xen/common/domain.c
-3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen/common/event.c
3fba5b96H0khoxNiKbjdi0inpXV-Pw xen/common/event_channel.c
3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c
3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c
40715b2cNVOegtvyft_AHFKJYRprfA xen/drivers/acpi/tables.c
3e4a8cb7alzQCDKS7MlioPoHBKYkdQ xen/drivers/char/Makefile
4049e6bfNSIq7s7OV-Bd69QD0RpR2Q xen/drivers/char/console.c
-3e4a8cb7WmiYdC-ASGiCSG_CL8vsqg xen/drivers/char/keyboard.c
3e4a8cb7nMChlro4wvOBo76n__iCFA xen/drivers/char/serial.c
3ddb79beUWngyIhMHgyPtuTem4o4JA xen/drivers/pci/Makefile
3ddb79beU9td0Mnm0VUMklerBa37qQ xen/drivers/pci/compat.c
3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/include/asm-x86/rwlock.h
3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-x86/smp.h
3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-x86/smpboot.h
-3ddb79c3e9DCEoR-WzNxcOQDzLu7BQ xen/include/asm-x86/softirq.h
3ddb79c3NiyQE2vQnyGiaBnNjBO1rA xen/include/asm-x86/spinlock.h
3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/string.h
3ddb79c3ezddh34MdelJpa5tNR00Dw xen/include/asm-x86/system.h
3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
-3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h
4051db79512nOCGweabrFWO2M2h5ng xen/include/hypervisor-ifs/physdev.h
40589968wmhPmV5-ENbBYmMjnedgKw xen/include/hypervisor-ifs/sched_ctl.h
404f3d2eR2Owk-ZcGOx9ULGHg3nrww xen/include/hypervisor-ifs/trace.h
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop | (STOPCODE_shutdown << SCHEDOP_reasonshift))
+ "b" (SCHEDOP_suspend | (STOPCODE_shutdown << SCHEDOP_reasonshift))
: "memory" );
return ret;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop | (STOPCODE_reboot << SCHEDOP_reasonshift))
+ "b" (SCHEDOP_suspend | (STOPCODE_reboot << SCHEDOP_reasonshift))
: "memory" );
return ret;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop | (STOPCODE_suspend << SCHEDOP_reasonshift)),
+ "b" (SCHEDOP_suspend | (STOPCODE_suspend << SCHEDOP_reasonshift)),
"S" (srec) : "memory" );
return ret;
return ret;
}
-static __inline__ long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
- "b" (op), "c" (val) : "memory" );
-
- return ret;
-}
-
static __inline__ int HYPERVISOR_update_va_mapping(
unsigned long page_nr, unsigned long new_val, unsigned long flags)
{
multicall_entry_t multicall_list[8];
int nr_multicall_ents = 0;
-/* used so we treat multiple stop requests as a single one */
-int suspending = 0;
-
/*
* Machine setup..
*/
* Time-to-die callback handling.
*/
-/* Dynamically-mapped IRQ. */
-static int die_irq;
-
-static void die_interrupt(int irq, void *unused, struct pt_regs *regs)
+static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
{
extern void ctrl_alt_del(void);
+ ctrl_if_send_response(msg);
ctrl_alt_del();
}
-static int __init setup_die_event(void)
+static int __init setup_shutdown_event(void)
{
- die_irq = bind_virq_to_irq(VIRQ_DIE);
- (void)request_irq(die_irq, die_interrupt, 0, "die", NULL);
+ ctrl_if_register_receiver(CMSG_SUSPEND, shutdown_handler, 0);
return 0;
}
-__initcall(setup_die_event);
+__initcall(setup_shutdown_event);
/******************************************************************************
#include <asm/suspend.h>
-static void stop_task(void *unused)
+/* Treat multiple suspend requests as a single one. */
+static int suspending;
+
+static void suspend_task(void *unused)
{
/* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
extern void blkdev_suspend(void);
free_page((unsigned long)suspend_record);
}
-static struct tq_struct stop_tq;
-
-/* Dynamically-mapped IRQ. */
-static int stop_irq;
+static struct tq_struct suspend_tq;
-static void stop_interrupt(int irq, void *unused, struct pt_regs *regs)
+static void suspend_handler(ctrl_msg_t *msg, unsigned long id)
{
- if (!suspending)
+ if ( !suspending )
{
suspending = 1;
- stop_tq.routine = stop_task;
- schedule_task(&stop_tq);
+ suspend_tq.routine = suspend_task;
+ schedule_task(&suspend_tq);
}
else
- printk(KERN_ALERT"Ignore queued stop request\n");
+ {
+ printk(KERN_ALERT"Ignore queued suspend request\n");
+ }
+
+ ctrl_if_send_response(msg);
}
-static int __init setup_stop_event(void)
+static int __init setup_suspend_event(void)
{
- stop_irq = bind_virq_to_irq(VIRQ_STOP);
- (void)request_irq(stop_irq, stop_interrupt, 0, "stop", NULL);
+ ctrl_if_register_receiver(CMSG_SUSPEND, suspend_handler, 0);
return 0;
}
-__initcall(setup_stop_event);
-
+__initcall(setup_suspend_event);
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop | (STOPCODE_shutdown << SCHEDOP_reasonshift))
+ "b" (SCHEDOP_suspend | (STOPCODE_shutdown << SCHEDOP_reasonshift))
: "memory" );
return ret;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop | (STOPCODE_reboot << SCHEDOP_reasonshift))
+ "b" (SCHEDOP_suspend | (STOPCODE_reboot << SCHEDOP_reasonshift))
: "memory" );
return ret;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- "b" (SCHEDOP_stop | (STOPCODE_suspend << SCHEDOP_reasonshift)),
+ "b" (SCHEDOP_suspend | (STOPCODE_suspend << SCHEDOP_reasonshift)),
"S" (srec) : "memory" );
return ret;
return ret;
}
-static inline long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
-{
- int ret;
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
- "b" (op), "c" (val) : "memory" );
-
- return ret;
-}
-
static inline int HYPERVISOR_update_va_mapping(
unsigned long page_nr, pte_t new_val, unsigned long flags)
{
+++ /dev/null
-/* Portions copyright (c) 2003 James Scott, Intel Research Cambridge */
-/*
- * Talks to hypervisor to get PS/2 keyboard and mouse events, and send keyboard
- * and mouse commands
- */
-
-/* Based on:
- * linux/include/asm-i386/keyboard.h
- *
- * Created 3 Nov 1996 by Geert Uytterhoeven
- */
-
-#ifndef _XEN_KEYBOARD_H
-#define _XEN_KEYBOARD_H
-
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/kd.h>
-#include <linux/pm.h>
-#include <asm/io.h>
-
-extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
-extern int pckbd_getkeycode(unsigned int scancode);
-extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
- char raw_mode);
-extern char pckbd_unexpected_up(unsigned char keycode);
-extern void pckbd_leds(unsigned char leds);
-extern void pckbd_init_hw(void);
-extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *);
-
-extern pm_callback pm_kbd_request_override;
-extern unsigned char pckbd_sysrq_xlate[128];
-
-#define kbd_setkeycode pckbd_setkeycode
-#define kbd_getkeycode pckbd_getkeycode
-#define kbd_translate pckbd_translate
-#define kbd_unexpected_up pckbd_unexpected_up
-#define kbd_leds pckbd_leds
-#define kbd_init_hw pckbd_init_hw
-#define kbd_sysrq_xlate pckbd_sysrq_xlate
-
-#define SYSRQ_KEY 0x54
-
-
-/* THIS SECTION TALKS TO XEN TO DO PS2 SUPPORT */
-#include <asm/hypervisor-ifs/kbd.h>
-#include <asm/hypervisor-ifs/hypervisor-if.h>
-
-#define kbd_controller_present xen_kbd_controller_present
-
-static inline int xen_kbd_controller_present ()
-{
- return start_info.flags & SIF_INITDOMAIN;
-}
-
-/* resource allocation */
-#define kbd_request_region() \
- do { } while (0)
-#define kbd_request_irq(handler) \
- do { \
- int irq = bind_virq_to_irq(VIRQ_PS2); \
- request_irq(irq, handler, 0, "ps/2", NULL); \
- } while ( 0 )
-
-/* could implement these with command to xen to filter mouse stuff... */
-#define aux_request_irq(hand, dev_id) 0
-#define aux_free_irq(dev_id) do { } while(0)
-
-/* Some stoneage hardware needs delays after some operations. */
-#define kbd_pause() do { } while(0)
-
-static unsigned char kbd_current_scancode = 0;
-
-static unsigned char kbd_read_input(void)
-{
- return kbd_current_scancode;
-}
-
-static unsigned char kbd_read_status(void)
-{
- long res;
- res = HYPERVISOR_kbd_op(KBD_OP_READ,0);
- if ( res<0 )
- {
- kbd_current_scancode = 0;
- return 0; /* error with our request - wrong domain? */
- }
- kbd_current_scancode = KBD_CODE_SCANCODE(res);
- return KBD_CODE_STATUS(res);
-}
-
-
-#define kbd_write_output(val) HYPERVISOR_kbd_op(KBD_OP_WRITEOUTPUT, val);
-#define kbd_write_command(val) HYPERVISOR_kbd_op(KBD_OP_WRITECOMMAND, val);
-
-
-#endif /* __KERNEL__ */
-#endif /* _XEN_KEYBOARD_H */
ln -sf ../asm-i386/ioctls.h
ln -sf ../asm-i386/ipcbuf.h
ln -sf ../asm-i386/ipc.h
+ln -sf ../asm-i386/keyboard.h
ln -sf ../asm-i386/kmap_types.h
ln -sf ../asm-i386/ldt.h
ln -sf ../asm-i386/linux_logo.h
dom0_op_t op;
op.cmd = DOM0_STOPDOMAIN;
op.u.stopdomain.domain = (domid_t)domid;
- op.u.stopdomain.sync = 0; /* async */
return do_dom0_op(xc_handle, &op);
}
info->has_cpu =
(op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) == DOMSTATE_RUNNING;
info->stopped =
- (op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) == DOMSTATE_STOPPED;
+ (op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK)
+ == DOMSTATE_SUSPENDED;
info->nr_pages = op.u.getdomaininfo.tot_pages;
info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT-10);
PERROR("Could not get info on domain");
goto error_out;
}
- if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_STOPPED) ||
+ if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_PAUSED) ||
(ctxt->pt_base != 0) )
{
ERROR("Domain is already constructed");
PERROR("Could not get info on domain");
goto error_out;
}
- if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_STOPPED) ||
+ if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_PAUSED) ||
(op.u.getdomaininfo.ctxt->pt_base != 0) )
{
ERROR("Domain is already constructed");
int xc_domain_stop_sync( int xc_handle, domid_t domid,
dom0_op_t *op, full_execution_context_t *ctxt)
{
- int i;
-
- printf("Sleep:");
-
- for( i = 0; ; i++ )
- {
-
- op->cmd = DOM0_STOPDOMAIN;
- op->u.stopdomain.domain = (domid_t)domid;
- op->u.stopdomain.sync = 1;
- do_dom0_op(xc_handle, op);
- /* can't trust return code due to sync stop hack :-(( */
-
- op->cmd = DOM0_GETDOMAININFO;
- op->u.getdomaininfo.domain = (domid_t)domid;
- op->u.getdomaininfo.ctxt = ctxt;
- if ( (do_dom0_op(xc_handle, op) < 0) ||
- ((u32)op->u.getdomaininfo.domain != domid) )
- {
- PERROR("Could not get info on domain");
- goto out;
- }
-
- if ( (op->u.getdomaininfo.flags & DOMFLAGS_STATEMASK) ==
- DOMSTATE_STOPPED )
- {
- printf("Domain %u stopped\n",domid);
- return 0;
- }
-
- printf(".");
- }
-
- printf("\n");
-
- out:
- return -1;
+ op->cmd = DOM0_STOPDOMAIN;
+ op->u.stopdomain.domain = (domid_t)domid;
+ do_dom0_op(xc_handle, op);
+ return 0;
}
long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid )
/*
- * Stop codes for SCHEDOP_stop. These are opaque to Xen but interpreted by
+ * Stop codes for SCHEDOP_suspend. These are opaque to Xen but interpreted by
* control software to determine appropriate action.
*/
#define CMSG_BLKIF_FE 2 /* Block-device frontend */
#define CMSG_NETIF_BE 3 /* Network-device backend */
#define CMSG_NETIF_FE 4 /* Network-device frontend */
+#define CMSG_SUSPEND 5 /* Suspend messages */
+#define CMSG_SHUTDOWN 6 /* Shutdown messages */
/******************************************************************************
u32 status; /* 0: NETIF_DRIVER_STATUS_??? */
} PACKED netif_be_driver_status_changed_t; /* 4 bytes */
+
+/******************************************************************************
+ * SUSPEND DEFINITIONS
+ */
+
+/*
+ * Subtypes for console messages.
+ */
+/* None. */
+
+
+/******************************************************************************
+ * CONSOLE DEFINITIONS
+ */
+
+/*
+ * Subtypes for console messages.
+ */
+#define CMSG_SHUTDOWN_HALT 0 /* Shutdown and halt (don't die). */
+#define CMSG_SHUTDOWN_POWEROFF 1 /* 'Poweroff' => clean death. */
+#define CMSG_SHUTDOWN_REBOOT 2 /* Shutdown and restart. */
+
+
#endif /* __DOMAIN_CONTROLLER_H__ */
int local_port, int remote_port){
// From our prespective rx = producer, tx = consumer.
int err = 0;
- printf("%s> dom=%u %d:%d\n", __FUNCTION__, dom, local_port, remote_port);
+ printf("%s> dom=%u %d:%d\n", __FUNCTION__, (unsigned int)dom,
+ local_port, remote_port);
// Consumer = tx.
//xup->interface->tx_resp_prod = 0;
//xup->interface->tx_req_prod = 0;
xup->tx_resp_prod = xup->interface->tx_resp_prod;
xup->tx_req_cons = xup->interface->tx_resp_prod;
- printf("%s> tx: %p %p : %p %p\n", __FUNCTION__,
- xup->interface->tx_resp_prod,
- xup->tx_resp_prod,
- xup->tx_req_cons,
- xup->interface->tx_req_prod);
+ printf("%s> tx: %u %u : %u %u\n", __FUNCTION__,
+ (unsigned int)xup->interface->tx_resp_prod,
+ (unsigned int)xup->tx_resp_prod,
+ (unsigned int)xup->tx_req_cons,
+ (unsigned int)xup->interface->tx_req_prod);
// Producer = rx.
//xup->interface->rx_req_prod = 0;
//xup->interface->rx_resp_prod = 0;
xup->rx_req_prod = xup->interface->rx_req_prod;
xup->rx_resp_cons = xup->interface->rx_resp_prod;
- printf("%s> rx: %p %p : %p %p\n", __FUNCTION__,
- xup->rx_resp_cons,
- xup->interface->rx_resp_prod,
- xup->interface->rx_req_prod,
- xup->rx_req_prod);
+ printf("%s> rx: %u %u : %u %u\n", __FUNCTION__,
+ (unsigned int)xup->rx_resp_cons,
+ (unsigned int)xup->interface->rx_resp_prod,
+ (unsigned int)xup->interface->rx_req_prod,
+ (unsigned int)xup->rx_req_prod);
xup->remote_dom = dom;
xup->local_port = local_port;
OLDESP = 0x3C
OLDSS = 0x40
-/* Offsets in task_struct */
+/* Offsets in domain structure */
PROCESSOR = 0
-HYP_EVENTS = 2
SHARED_INFO = 4
EVENT_SEL = 8
EVENT_ADDR = 12
/* No special register assumptions */
failsafe_callback:
GET_CURRENT(%ebx)
- movzwl PROCESSOR(%ebx),%eax
+ movl PROCESSOR(%ebx),%eax
shl $4,%eax
lea guest_trap_bounce(%eax),%edx
movl FAILSAFE_ADDR(%ebx),%eax
notl %ecx
cli # tests must not race interrupts
/*test_softirqs:*/
- movzwl PROCESSOR(%ebx),%eax
+ movl PROCESSOR(%ebx),%eax
shl $6,%eax # sizeof(irq_cpustat) == 64
test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
jnz process_softirqs
-/*test_hyp_events:*/
- testw %cx, HYP_EVENTS(%ebx)
- jnz process_hyp_events
/*test_guest_events:*/
movl SHARED_INFO(%ebx),%eax
testb $0xFF,UPCALL_MASK(%eax)
jz restore_all_guest
movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery
/*process_guest_events:*/
- movzwl PROCESSOR(%ebx),%edx
+ movl PROCESSOR(%ebx),%edx
shl $4,%edx # sizeof(guest_trap_bounce) == 16
lea guest_trap_bounce(%edx),%edx
movl EVENT_ADDR(%ebx),%eax
sti
call SYMBOL_NAME(do_softirq)
jmp test_all_events
-
- ALIGN
-process_hyp_events:
- sti
- call SYMBOL_NAME(do_hyp_events)
- jmp test_all_events
-
+
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
/* {EIP, CS, EFLAGS, [ESP, SS]} */
/* %edx == guest_trap_bounce, %ebx == task_struct */
test $2,%cl
jz 1f /* jump if returning to an existing ring-1 activation */
/* obtain ss/esp from TSS -- no current ring-1 activations */
- movzwl PROCESSOR(%ebx),%eax
+ movl PROCESSOR(%ebx),%eax
/* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
movl %eax, %ecx
shll $7, %ecx
crash_domain_fixup1:
subl $4,%esp
SAVE_ALL
- jmp crash_domain
+ jmp domain_crash
crash_domain_fixup2:
addl $4,%esp
crash_domain_fixup3:
pushl %ss
popl %ds
- jmp crash_domain
+ jmp domain_crash
.previous
ALIGN
process_guest_exception_and_events:
- movzwl PROCESSOR(%ebx),%eax
+ movl PROCESSOR(%ebx),%eax
shl $4,%eax
lea guest_trap_bounce(%eax),%edx
testb $~0,GTB_FLAGS(%edx)
.long SYMBOL_NAME(do_set_fast_trap) /* 15 */
.long SYMBOL_NAME(do_dom_mem_op)
.long SYMBOL_NAME(do_multicall)
- .long SYMBOL_NAME(do_kbd_op)
+ .long SYMBOL_NAME(do_ni_syscall) # do_kbd_op
.long SYMBOL_NAME(do_update_va_mapping)
.long SYMBOL_NAME(do_set_timer_op) /* 20 */
.long SYMBOL_NAME(do_event_channel_op)
{
__asm__("fninit");
if ( cpu_has_xmm ) load_mxcsr(0x1f80);
- set_bit(PF_DONEFPUINIT, ¤t->flags);
+ set_bit(DF_DONEFPUINIT, ¤t->flags);
}
-static inline void __save_init_fpu( struct task_struct *tsk )
+static inline void __save_init_fpu( struct domain *tsk )
{
if ( cpu_has_fxsr ) {
asm volatile( "fxsave %0 ; fnclex"
asm volatile( "fnsave %0 ; fwait"
: "=m" (tsk->thread.i387) );
}
- clear_bit(PF_USEDFPU, &tsk->flags);
+ clear_bit(DF_USEDFPU, &tsk->flags);
}
-void save_init_fpu( struct task_struct *tsk )
+void save_init_fpu( struct domain *tsk )
{
/*
* The guest OS may have set the 'virtual STTS' flag.
* This causes us to set the real flag, so we'll need
* to temporarily clear it while saving f-p state.
*/
- if ( test_bit(PF_GUEST_STTS, &tsk->flags) ) clts();
+ if ( test_bit(DF_GUEST_STTS, &tsk->flags) ) clts();
__save_init_fpu(tsk);
stts();
}
-void restore_fpu( struct task_struct *tsk )
+void restore_fpu( struct domain *tsk )
{
if ( cpu_has_fxsr ) {
asm volatile( "fxrstor %0"
#include <xen/sched.h>
#include <asm/desc.h>
-struct task_struct idle0_task = IDLE0_TASK(idle0_task);
+struct domain idle0_task = IDLE0_TASK(idle0_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
u8 nr_guests;
u8 in_flight;
u8 shareable;
- struct task_struct *guest[IRQ_MAX_GUESTS];
+ struct domain *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
static void __do_IRQ_guest(int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- struct task_struct *p;
+ struct domain *p;
int i;
for ( i = 0; i < action->nr_guests; i++ )
}
}
-int pirq_guest_unmask(struct task_struct *p)
+int pirq_guest_unmask(struct domain *p)
{
irq_desc_t *desc;
int i, j, pirq;
return 0;
}
-int pirq_guest_bind(struct task_struct *p, int irq, int will_share)
+int pirq_guest_bind(struct domain *p, int irq, int will_share)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
return rc;
}
-int pirq_guest_unbind(struct task_struct *p, int irq)
+int pirq_guest_unbind(struct domain *p, int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
}
-long set_gdt(struct task_struct *p,
+long set_gdt(struct domain *p,
unsigned long *frames,
unsigned int entries)
{
else if (strcmp(ptr, "fThreadInfo") == 0)
{
#ifdef PDB_PAST
- struct task_struct *p;
+ struct domain *p;
u_long flags;
#endif /* PDB_PAST */
#ifdef PDB_PAST
int thread = 0;
char message[16];
- struct task_struct *p;
+ struct domain *p;
p = find_domain_by_id(pdb_ctx[pdb_level].info);
strncpy (message, p->name, 16);
- put_task_struct(p);
+ put_domain(p);
ptr += 16;
if (hexToInt (&ptr, &thread))
{
if (pdb_ctx.domain == -1) /* pdb context: xen */
{
- struct task_struct *p;
+ struct domain *p;
p = &idle0_task;
if (p->mm.shadow_mode)
}
else if (pdb_ctx.process == -1) /* pdb context: guest os */
{
- struct task_struct *p;
+ struct domain *p;
if (pdb_ctx.domain == -2)
{
pdb_ctx.ptbr = pagetable_val(p->mm.shadow_table);
else
pdb_ctx.ptbr = pagetable_val(p->mm.pagetable);
- put_task_struct(p);
+ put_domain(p);
}
else /* pdb context: process */
{
- struct task_struct *p;
+ struct domain *p;
unsigned long domain_ptbr;
p = find_domain_by_id(pdb_ctx.domain);
domain_ptbr = pagetable_val(p->mm.shadow_table);
else
domain_ptbr = pagetable_val(p->mm.pagetable);
- put_task_struct(p);
+ put_domain(p);
pdb_ctx.ptbr = domain_ptbr;
/*pdb_ctx.ptbr=pdb_linux_pid_ptbr(domain_ptbr, pdb_ctx.process);*/
{
case PDB_LVL_XEN:
{
- struct task_struct *p;
+ struct domain *p;
id -= PDB_ID_OFFSET;
if ( (p = find_domain_by_id(id)) == NULL)
strcpy (pdb_out_buffer, "E00");
else
strcpy (pdb_out_buffer, "OK");
- put_task_struct(p);
+ put_domain(p);
pdb_level = PDB_LVL_GUESTOS;
pdb_ctx[pdb_level].ctrl = id;
}
else
{
- struct task_struct *p = find_domain_by_id(0);
+ struct domain *p = find_domain_by_id(0);
printk ("pdb error: cr3: 0x%lx dom0cr3: 0x%lx\n", cr3,
p->mm.shadow_mode ? pagetable_val(p->mm.shadow_table)
: pagetable_val(p->mm.pagetable));
- put_task_struct(p);
+ put_domain(p);
printk ("pdb error: L2:0x%p (0x%lx)\n",
l2_table, l2_pgentry_val(*l2_table));
}
*/
static void default_idle(void)
{
- if (!hlt_counter) {
+ if ( hlt_counter == 0 )
+ {
__cli();
- if (!current->hyp_events && !softirq_pending(smp_processor_id()))
+ if ( !softirq_pending(smp_processor_id()) )
safe_halt();
else
__sti();
for ( ; ; )
{
irq_stat[cpu].idle_timestamp = jiffies;
- while (!current->hyp_events && !softirq_pending(cpu))
+ while ( !softirq_pending(cpu) )
default_idle();
- do_hyp_events();
do_softirq();
}
}
{
/* Just some sanity to ensure that the scheduler is set up okay. */
ASSERT(current->domain == IDLE_DOMAIN_ID);
- (void)wake_up(current);
+ domain_controller_unpause(current);
__enter_scheduler();
/*
machine_restart(0);
}
-void new_thread(struct task_struct *p,
+void new_thread(struct domain *p,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info)
:"r" (thread->debugreg[register]))
-void switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+void switch_to(struct domain *prev_p, struct domain *next_p)
{
struct thread_struct *next = &next_p->thread;
struct tss_struct *tss = init_tss + smp_processor_id();
unsigned long wait_init_idle;
-struct task_struct *idle_task[NR_CPUS] = { &idle0_task };
+struct domain *idle_task[NR_CPUS] = { &idle0_task };
#ifdef CONFIG_ACPI_INTERPRETER
int acpi_disabled = 0;
extern void trap_init(void);
extern void time_init(void);
extern void ac_timer_init(void);
- extern void initialize_keytable();
- extern void initialize_keyboard(void);
+ extern void initialize_keytable();
extern int opt_nosmp, opt_watchdog, opt_noacpi, opt_ignorebiostables;
extern int do_timer_lists_from_pit;
unsigned long low_mem_size;
memguard_guard_range(cpu0_stack, PAGE_SIZE);
#endif
- open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
- (void *)new_tlbflush_clock_period,
- NULL);
+ open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
if ( opt_watchdog )
nmi_watchdog = NMI_LOCAL_APIC;
init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
trap_init();
time_init(); /* installs software handler for HZ clock. */
- softirq_init();
init_apic_mappings(); /* make APICs addressable in our pagetables. */
#ifndef CONFIG_SMP
__sti();
- initialize_keytable(); /* call back handling for key codes */
+ initialize_keytable(); /* call back handling for key codes */
serial_init_stage2();
- initialize_keyboard(); /* setup keyboard (also for debugging) */
#ifdef XEN_DEBUGGER
initialize_pdb(); /* pervasive debugger */
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
*/
{
- struct task_struct *idle;
+ struct domain *idle;
unsigned long boot_error = 0;
int timeout, cpu;
unsigned long start_eip, stack;
if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
panic("failed 'createdomain' for CPU %d", cpu);
- set_bit(PF_IDLETASK, &idle->flags);
+ set_bit(DF_IDLETASK, &idle->flags);
idle->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
struct pt_regs *regs,
long error_code, int use_error_code)
{
- struct task_struct *p = current;
+ struct domain *p = current;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
unsigned long fixup;
asmlinkage void do_int3(struct pt_regs *regs, long error_code)
{
- struct task_struct *p = current;
+ struct domain *p = current;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
unsigned long off, addr, fixup;
- struct task_struct *p = current;
+ struct domain *p = current;
extern int map_ldt_shadow_page(unsigned int);
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
{
- struct task_struct *p = current;
+ struct domain *p = current;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
unsigned long fixup;
/* Prevent recursion. */
clts();
- if ( !test_bit(PF_USEDFPU, ¤t->flags) )
+ if ( !test_bit(DF_USEDFPU, ¤t->flags) )
{
- if ( test_bit(PF_DONEFPUINIT, ¤t->flags) )
+ if ( test_bit(DF_DONEFPUINIT, ¤t->flags) )
restore_fpu(current);
else
init_fpu();
- set_bit(PF_USEDFPU, ¤t->flags); /* so we fnsave on switch_to() */
+ set_bit(DF_USEDFPU, ¤t->flags); /* so we fnsave on switch_to() */
}
- if ( test_and_clear_bit(PF_GUEST_STTS, ¤t->flags) )
+ if ( test_and_clear_bit(DF_GUEST_STTS, ¤t->flags) )
{
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
gtb->flags = GTBF_TRAP_NOCODE;
asmlinkage void do_pdb_debug(struct pt_regs *regs, long error_code)
{
unsigned int condition;
- struct task_struct *tsk = current;
+ struct domain *tsk = current;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
asmlinkage void do_debug(struct pt_regs *regs, long error_code)
{
unsigned int condition;
- struct task_struct *tsk = current;
+ struct domain *tsk = current;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
#ifdef XEN_DEBUGGER
unsigned long failsafe_selector,
unsigned long failsafe_address)
{
- struct task_struct *p = current;
+ struct domain *p = current;
if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
return -EPERM;
}
-long set_fast_trap(struct task_struct *p, int idx)
+long set_fast_trap(struct domain *p, int idx)
{
trap_info_t *ti;
long do_fpu_taskswitch(void)
{
- set_bit(PF_GUEST_STTS, ¤t->flags);
+ set_bit(DF_GUEST_STTS, ¤t->flags);
stts();
return 0;
}
-long set_debugreg(struct task_struct *p, int reg, unsigned long value)
+long set_debugreg(struct domain *p, int reg, unsigned long value)
{
int i;
}
-static void ac_timer_softirq_action(struct softirq_action *a)
+static void ac_timer_softirq_action(void)
{
int cpu = smp_processor_id();
struct ac_timer *t, **heap;
printk ("ACT: Initialising Accurate timers\n");
- open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action, NULL);
+ open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action);
for ( i = 0; i < smp_num_cpus; i++ )
{
{
case 'c' :
{
- struct task_struct * p = find_domain_by_id(op->u.debug.domain);
+ struct domain *p = find_domain_by_id(op->u.debug.domain);
if ( p != NULL )
{
- if ( test_bit(PF_CONSTRUCTED, &p->flags) )
- {
- wake_up(p);
- reschedule(p);
- }
- put_task_struct(p);
+ domain_controller_unpause(p);
+ put_domain(p);
}
else
{
int loop;
u_char x;
unsigned long cr3;
- struct task_struct *p;
+ struct domain *p;
p = find_domain_by_id(op->u.debug.domain);
if (p->mm.shadow_mode)
printk (" %02x", x);
}
printk ("\n");
- put_task_struct(p);
+ put_domain(p);
break;
}
case 's' :
{
- struct task_struct * p = find_domain_by_id(op->u.debug.domain);
+ struct domain * p = find_domain_by_id(op->u.debug.domain);
if (p != NULL)
{
- if (p->state != TASK_STOPPED)
- {
- send_guest_virq(p, VIRQ_STOP);
- }
- put_task_struct(p);
+ domain_controller_pause(p);
+ put_domain(p);
}
else
{
#define TRC_DOM0OP_ENTER_BASE 0x00020000
#define TRC_DOM0OP_LEAVE_BASE 0x00030000
-extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
+extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
static int msr_cpu_mask;
static unsigned long msr_addr;
case DOM0_BUILDDOMAIN:
{
- struct task_struct * p = find_domain_by_id(op->u.builddomain.domain);
+ struct domain * p = find_domain_by_id(op->u.builddomain.domain);
ret = -EINVAL;
if ( p != NULL )
{
ret = final_setup_guestos(p, &op->u.builddomain);
- put_task_struct(p);
+ put_domain(p);
}
}
break;
case DOM0_STARTDOMAIN:
{
- struct task_struct * p = find_domain_by_id(op->u.startdomain.domain);
- ret = -EINVAL;
- if ( p != NULL )
+ struct domain *d = find_domain_by_id(op->u.startdomain.domain);
+ ret = -ESRCH;
+ if ( d != NULL )
{
- if ( test_bit(PF_CONSTRUCTED, &p->flags) )
+ ret = -EINVAL;
+ if ( test_bit(DF_CONSTRUCTED, &d->flags) )
{
- wake_up(p);
- reschedule(p);
+ domain_controller_unpause(d);
ret = 0;
}
- put_task_struct(p);
+ put_domain(d);
}
}
break;
case DOM0_STOPDOMAIN:
{
- ret = stop_other_domain(op->u.stopdomain.domain);
-
- /*
- * This is grim, but helps for live migrate. It's also unsafe
- * in the strict sense as we're not explicitly setting a
- * timeout, but dom0 is bound to have other timers going off to
- * wake us back up.
- * We go to sleep so that the other domain can stop quicker, hence
- * we have less total down time in a migrate.
- */
- if( ret == 0 && op->u.stopdomain.sync == 1 )
+ struct domain *d = find_domain_by_id(op->u.stopdomain.domain);
+ ret = -ESRCH;
+ if ( d != NULL )
{
- extern long do_block( void );
- do_block(); /* Yuk... */
+ domain_controller_pause(d);
+ put_domain(d);
+ ret = 0;
}
}
break;
case DOM0_CREATEDOMAIN:
{
- struct task_struct *p;
+ struct domain *p;
static domid_t domnr = 0;
static spinlock_t domnr_lock = SPIN_LOCK_UNLOCKED;
unsigned int pro;
if ( (p = find_domain_by_id(dom)) == NULL )
break;
- put_task_struct(p);
+ put_domain(p);
}
if (op->u.createdomain.cpu == -1 )
ret = alloc_new_dom_mem(p, op->u.createdomain.memory_kb);
if ( ret != 0 )
{
- __kill_domain(p);
+ domain_kill(p);
break;
}
case DOM0_DESTROYDOMAIN:
{
- domid_t dom = op->u.destroydomain.domain;
- int force = op->u.destroydomain.force;
- ret = kill_other_domain(dom, force);
+ struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
+ ret = -ESRCH;
+ if ( d != NULL )
+ {
+ ret = -EINVAL;
+ if ( d != current )
+ {
+ domain_kill(d);
+ put_domain(d);
+ ret = 0;
+ }
+ }
}
break;
ret = -EINVAL;
else
{
- struct task_struct * p = find_domain_by_id(dom);
+ struct domain * p = find_domain_by_id(dom);
int cpu = op->u.pincpudomain.cpu;
- int we_paused = 0;
ret = -ESRCH;
if ( cpu == -1 )
{
p->cpupinned = 0;
- ret = 0;
}
else
{
- /* Pause domain if necessary. */
- if( !(p->state & TASK_STOPPED) &&
- !(p->state & TASK_PAUSED) )
- {
- sched_pause_sync(p);
- we_paused = 1;
- }
-
- /* We need a task structure lock here!!!
- FIX ME!! */
+ domain_pause(p);
cpu = cpu % smp_num_cpus;
p->processor = cpu;
- p->cpupinned = 1;
-
- if ( we_paused )
- wake_up(p);
-
- ret = 0;
+ p->cpupinned = 1;
+ domain_unpause(p);
}
- put_task_struct(p);
+ put_domain(p);
+ ret = 0;
}
}
}
case DOM0_GETMEMLIST:
{
int i;
- struct task_struct *p = find_domain_by_id(op->u.getmemlist.domain);
+ struct domain *p = find_domain_by_id(op->u.getmemlist.domain);
unsigned long max_pfns = op->u.getmemlist.max_pfns;
unsigned long pfn;
unsigned long *buffer = op->u.getmemlist.buffer;
op->u.getmemlist.num_pfns = i;
copy_to_user(u_dom0_op, op, sizeof(*op));
- put_task_struct(p);
+ put_domain(p);
}
}
break;
case DOM0_GETDOMAININFO:
{
full_execution_context_t *c;
- struct task_struct *p;
+ struct domain *p;
unsigned long flags;
- int i;
+ int i, dump_state = 0;
read_lock_irqsave(&tasklist_lock, flags);
break;
}
- if ( (p == NULL) || (p->state == TASK_DYING) )
+ if ( p == NULL )
{
ret = -ESRCH;
goto gdi_out;
}
else
{
- op->u.getdomaininfo.domain = p->domain;
+ op->u.getdomaininfo.domain = p->domain;
strcpy(op->u.getdomaininfo.name, p->name);
- if ( p->state == TASK_RUNNING )
- op->u.getdomaininfo.flags =
- p->has_cpu ? DOMSTATE_RUNNING : DOMSTATE_RUNNABLE;
- else if ( (p->state == TASK_INTERRUPTIBLE) ||
- (p->state == TASK_UNINTERRUPTIBLE) )
- op->u.getdomaininfo.flags = DOMSTATE_BLOCKED;
- else if ( p->state == TASK_PAUSED )
- op->u.getdomaininfo.flags = DOMSTATE_PAUSED;
- else if ( p->state == TASK_CRASHED )
+ /* These are kind of in order of 'importance'. */
+ if ( test_bit(DF_CRASHED, &p->flags) )
op->u.getdomaininfo.flags = DOMSTATE_CRASHED;
+ else if ( test_bit(DF_SUSPENDED, &p->flags) )
+ op->u.getdomaininfo.flags = DOMSTATE_SUSPENDED;
+ else if ( test_bit(DF_CONTROLPAUSE, &p->flags) )
+ op->u.getdomaininfo.flags = DOMSTATE_PAUSED;
+ else if ( test_bit(DF_BLOCKED, &p->flags) )
+ op->u.getdomaininfo.flags = DOMSTATE_BLOCKED;
else
- op->u.getdomaininfo.flags = DOMSTATE_STOPPED;
+ {
+ op->u.getdomaininfo.flags =
+ p->has_cpu ? DOMSTATE_RUNNING : DOMSTATE_RUNNABLE;
+ dump_state = 1;
+ }
+
op->u.getdomaininfo.flags |= p->processor << DOMFLAGS_CPUSHIFT;
op->u.getdomaininfo.flags |= p->stop_code << DOMFLAGS_GUESTSHIFT;
- op->u.getdomaininfo.hyp_events = p->hyp_events;
op->u.getdomaininfo.tot_pages = p->tot_pages;
op->u.getdomaininfo.max_pages = p->max_pages;
op->u.getdomaininfo.cpu_time = p->cpu_time;
op->u.getdomaininfo.shared_info_frame =
__pa(p->shared_info) >> PAGE_SHIFT;
- if ( (p->state == TASK_STOPPED) &&
- (op->u.getdomaininfo.ctxt != NULL) )
+ if ( dump_state && (op->u.getdomaininfo.ctxt != NULL) )
{
if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
{
memcpy(&c->cpu_ctxt,
&p->shared_info->execution_context,
sizeof(p->shared_info->execution_context));
- if ( test_bit(PF_DONEFPUINIT, &p->flags) )
+ if ( test_bit(DF_DONEFPUINIT, &p->flags) )
c->flags |= ECF_I387_VALID;
memcpy(&c->fpu_ctxt,
&p->thread.i387,
struct pfn_info *page;
unsigned long pfn = op->u.getpageframeinfo.pfn;
domid_t dom = op->u.getpageframeinfo.domain;
- struct task_struct *p;
+ struct domain *p;
ret = -EINVAL;
put_page(page);
}
- put_task_struct(p);
+ put_domain(p);
copy_to_user(u_dom0_op, op, sizeof(*op));
}
case DOM0_SHADOW_CONTROL:
{
- struct task_struct *p;
+ struct domain *p;
ret = -ESRCH;
p = find_domain_by_id( op->u.shadow_control.domain );
if ( p )
{
ret = shadow_mode_control(p, &op->u.shadow_control );
- put_task_struct(p);
+ put_domain(p);
copy_to_user(u_dom0_op, op, sizeof(*op));
}
}
case DOM0_SETDOMAINNAME:
{
- struct task_struct *p;
+ struct domain *p;
p = find_domain_by_id( op->u.setdomainname.domain );
if ( p )
{
strncpy(p->name, op->u.setdomainname.name, MAX_DOMAIN_NAME);
- put_task_struct(p);
+ put_domain(p);
}
else
ret = -ESRCH;
case DOM0_SETDOMAININITIALMEM:
{
- struct task_struct *p;
+ struct domain *p;
ret = -ESRCH;
p = find_domain_by_id( op->u.setdomaininitialmem.domain );
if ( p )
{
/* should only be used *before* domain is built. */
- if ( ! test_bit(PF_CONSTRUCTED, &p->flags) )
+ if ( ! test_bit(DF_CONSTRUCTED, &p->flags) )
ret = alloc_new_dom_mem(
p, op->u.setdomaininitialmem.initial_memkb );
else
ret = -EINVAL;
- put_task_struct(p);
+ put_domain(p);
}
}
break;
case DOM0_SETDOMAINMAXMEM:
{
- struct task_struct *p;
+ struct domain *p;
p = find_domain_by_id( op->u.setdomainmaxmem.domain );
if ( p )
{
p->max_pages =
(op->u.setdomainmaxmem.max_memkb+PAGE_SIZE-1)>> PAGE_SHIFT;
- put_task_struct(p);
+ put_domain(p);
}
else
ret = -ESRCH;
int num = op->u.getpageframeinfo2.num;
domid_t dom = op->u.getpageframeinfo2.domain;
unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
- struct task_struct *p;
+ struct domain *p;
unsigned long l_arr[GPF2_BATCH];
ret = -ESRCH;
n+=j;
}
- put_task_struct(p);
+ put_domain(p);
}
break;
#include <xen/event.h>
#include <asm/domain_page.h>
-static long alloc_dom_mem(struct task_struct *p,
+static long alloc_dom_mem(struct domain *p,
unsigned long *pages,
unsigned long nr_pages)
{
return i;
}
-static long free_dom_mem(struct task_struct *p,
+static long free_dom_mem(struct domain *p,
unsigned long *pages,
unsigned long nr_pages)
{
/* Both these structures are protected by the tasklist_lock. */
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
-struct task_struct *task_hash[TASK_HASH_SIZE];
-struct task_struct *task_list;
+struct domain *task_hash[TASK_HASH_SIZE];
+struct domain *task_list;
-struct task_struct *do_createdomain(domid_t dom_id, unsigned int cpu)
+struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
{
char buf[100];
- struct task_struct *p, **pp;
+ struct domain *p, **pp;
unsigned long flags;
- if ( (p = alloc_task_struct()) == NULL )
+ if ( (p = alloc_domain_struct()) == NULL )
return NULL;
atomic_set(&p->refcnt, 1);
+ atomic_set(&p->pausecnt, 0);
spin_lock_init(&p->mm.shadow_lock);
{
if ( init_event_channels(p) != 0 )
{
- free_task_struct(p);
+ free_domain_struct(p);
return NULL;
}
spin_lock_init(&p->pcidev_lock);
INIT_LIST_HEAD(&p->pcidev_list);
+ sched_add_domain(p);
+
write_lock_irqsave(&tasklist_lock, flags);
pp = &task_list; /* NB. task_list is maintained in order of dom_id. */
for ( pp = &task_list; *pp != NULL; pp = &(*pp)->next_list )
else
{
sprintf(p->name, "Idle-%d", cpu);
+ sched_add_domain(p);
}
- sched_add_domain(p);
return p;
}
-struct task_struct *find_domain_by_id(domid_t dom)
+struct domain *find_domain_by_id(domid_t dom)
{
- struct task_struct *p;
+ struct domain *p;
unsigned long flags;
read_lock_irqsave(&tasklist_lock, flags);
{
if ( p->domain == dom )
{
- get_task_struct(p);
+ if ( unlikely(!get_domain(p)) )
+ p = NULL;
break;
}
p = p->next_hash;
/* return the most recent domain created */
-struct task_struct *find_last_domain(void)
+struct domain *find_last_domain(void)
{
- struct task_struct *p, *plast;
+ struct domain *p, *plast;
unsigned long flags;
read_lock_irqsave(&tasklist_lock, flags);
plast = p;
p = p->next_list;
}
- get_task_struct(plast);
+ if ( !get_domain(plast) )
+ plast = NULL;
read_unlock_irqrestore(&tasklist_lock, flags);
return plast;
}
-void __kill_domain(struct task_struct *p)
+void domain_kill(struct domain *d)
{
- struct task_struct **pp;
- unsigned long flags;
-
- if ( p->domain == 0 )
+ domain_pause(d);
+ if ( !test_and_set_bit(DF_DYING, &d->flags) )
{
- extern void machine_restart(char *);
- printk("Domain 0 killed: rebooting machine!\n");
- machine_restart(0);
+ sched_rem_domain(d);
+ put_domain(d);
}
-
- /* Only allow the domain to be destroyed once. */
- if ( !sched_rem_domain(p) )
- return;
-
- DPRINTK("Killing domain %u\n", p->domain);
-
- destroy_event_channels(p);
-
- /*
- * Note this means that find_domain_by_id may fail, even when the caller
- * holds a reference to the domain being queried. Take care!
- */
- write_lock_irqsave(&tasklist_lock, flags);
- pp = &task_list; /* Delete from task_list. */
- while ( *pp != p )
- pp = &(*pp)->next_list;
- *pp = p->next_list;
- pp = &task_hash[TASK_HASH(p->domain)]; /* Delete from task_hash. */
- while ( *pp != p )
- pp = &(*pp)->next_hash;
- *pp = p->next_hash;
- write_unlock_irqrestore(&tasklist_lock, flags);
-
- if ( p == current )
- {
- __enter_scheduler();
- BUG(); /* never get here */
- }
- else
- {
- put_task_struct(p);
- }
-}
-
-
-void kill_domain(void)
-{
- __kill_domain(current);
-}
-
-
-long kill_other_domain(domid_t dom, int force)
-{
- struct task_struct *p;
-
- if ( (p = find_domain_by_id(dom)) == NULL )
- return -ESRCH;
-
- if ( (p->state == TASK_STOPPED) || (p->state == TASK_CRASHED) )
- __kill_domain(p);
- else if ( force )
- send_hyp_event(p, _HYP_EVENT_DIE);
- else
- send_guest_virq(p, VIRQ_DIE);
-
- put_task_struct(p);
- return 0;
}
-void crash_domain(void)
+void domain_crash(void)
{
- struct task_struct *p;
+ struct domain *d;
- set_current_state(TASK_CRASHED);
+ set_bit(DF_CRASHED, ¤t->flags);
- p = find_domain_by_id(0);
- send_guest_virq(p, VIRQ_DOM_EXC);
- put_task_struct(p);
+ d = find_domain_by_id(0);
+ send_guest_virq(d, VIRQ_DOM_EXC);
+ put_domain(d);
__enter_scheduler();
BUG();
}
-
-void stop_domain(u8 reason)
+void domain_suspend(u8 reason)
{
- struct task_struct *p;
+ struct domain *d;
if ( current->domain == 0 )
{
get_execution_context(),
sizeof(execution_context_t));
unlazy_fpu(current);
- wmb(); /* All CPUs must see saved info in state TASK_STOPPED. */
- set_current_state(TASK_STOPPED);
+ wmb(); /* All CPUs must see saved info when suspended. */
+ set_bit(DF_SUSPENDED, ¤t->flags);
- p = find_domain_by_id(0);
- send_guest_virq(p, VIRQ_DOM_EXC);
- put_task_struct(p);
+ d = find_domain_by_id(0);
+ send_guest_virq(d, VIRQ_DOM_EXC);
+ put_domain(d);
__enter_scheduler();
}
-long stop_other_domain(domid_t dom)
-{
- struct task_struct *p;
-
- if ( dom == 0 )
- return -EINVAL;
-
- p = find_domain_by_id(dom);
- if ( p == NULL) return -ESRCH;
-
- if ( p->state != TASK_STOPPED )
- send_guest_virq(p, VIRQ_STOP);
-
- put_task_struct(p);
- return 0;
-}
-
-struct pfn_info *alloc_domain_page(struct task_struct *p)
+struct pfn_info *alloc_domain_page(struct domain *p)
{
struct pfn_info *page = NULL;
unsigned long flags, mask, pfn_stamp, cpu_stamp;
void free_domain_page(struct pfn_info *page)
{
unsigned long flags;
- struct task_struct *p = page->u.domain;
+ struct domain *p = page->u.domain;
ASSERT(!in_irq());
}
-void free_all_dom_mem(struct task_struct *p)
+void free_all_dom_mem(struct domain *p)
{
struct list_head *ent, zombies;
struct pfn_info *page;
}
-unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
+unsigned int alloc_new_dom_mem(struct domain *p, unsigned int kbytes)
{
unsigned int alloc_pfns, nr_pages;
struct pfn_info *page;
/* Release resources belonging to task @p. */
-void release_task(struct task_struct *p)
+void domain_destruct(struct domain *p)
{
- ASSERT(p->state == TASK_DYING);
- ASSERT(!p->has_cpu);
+ struct domain **pp;
+ unsigned long flags;
+
+ if ( !test_bit(DF_DYING, &p->flags) )
+ BUG();
+
+ /* May be already destructed, or get_domain() can race us. */
+ if ( cmpxchg(&p->refcnt.counter, 0, DOMAIN_DESTRUCTED) != 0 )
+ return;
DPRINTK("Releasing task %u\n", p->domain);
+ /* Delete from task list and task hashtable. */
+ write_lock_irqsave(&tasklist_lock, flags);
+ pp = &task_list;
+ while ( *pp != p )
+ pp = &(*pp)->next_list;
+ *pp = p->next_list;
+ pp = &task_hash[TASK_HASH(p->domain)];
+ while ( *pp != p )
+ pp = &(*pp)->next_hash;
+ *pp = p->next_hash;
+ write_unlock_irqrestore(&tasklist_lock, flags);
+
+ destroy_event_channels(p);
+
/* Free all memory associated with this domain. */
free_page((unsigned long)p->mm.perdomain_pt);
UNSHARE_PFN(virt_to_page(p->shared_info));
free_all_dom_mem(p);
- free_task_struct(p);
+ free_domain_struct(p);
}
* than domain 0. ie. the domains that are being built by the userspace dom0
* domain builder.
*/
-int final_setup_guestos(struct task_struct *p, dom0_builddomain_t *builddomain)
+int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
{
unsigned long phys_basetab;
int i, rc = 0;
if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
return -ENOMEM;
- if ( test_bit(PF_CONSTRUCTED, &p->flags) )
+ if ( test_bit(DF_CONSTRUCTED, &p->flags) )
{
rc = -EINVAL;
goto out;
goto out;
}
- clear_bit(PF_DONEFPUINIT, &p->flags);
+ clear_bit(DF_DONEFPUINIT, &p->flags);
if ( c->flags & ECF_I387_VALID )
- set_bit(PF_DONEFPUINIT, &p->flags);
+ set_bit(DF_DONEFPUINIT, &p->flags);
memcpy(&p->shared_info->execution_context,
&c->cpu_ctxt,
sizeof(p->shared_info->execution_context));
/* Set up the shared info structure. */
update_dom_time(p->shared_info);
- set_bit(PF_CONSTRUCTED, &p->flags);
+ set_bit(DF_CONSTRUCTED, &p->flags);
out:
if (c) kfree(c);
return 0;
}
-int construct_dom0(struct task_struct *p,
+int construct_dom0(struct domain *p,
unsigned long alloc_start,
unsigned long alloc_end,
char *image_start, unsigned long image_len,
/* Machine address of next candidate page-table page. */
unsigned long mpt_alloc;
- extern void physdev_init_dom0(struct task_struct *);
+ extern void physdev_init_dom0(struct domain *);
/* Sanity! */
if ( p->domain != 0 )
BUG();
- if ( test_bit(PF_CONSTRUCTED, &p->flags) )
+ if ( test_bit(DF_CONSTRUCTED, &p->flags) )
BUG();
printk("*** LOADING DOMAIN 0 ***\n");
/* DOM0 gets access to everything. */
physdev_init_dom0(p);
- set_bit(PF_CONSTRUCTED, &p->flags);
+ set_bit(DF_CONSTRUCTED, &p->flags);
#if 0 /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
shadow_mode_enable(&p->mm, SHM_test);
+++ /dev/null
-/******************************************************************************
- * event.c
- *
- * A nice interface for passing per-domain asynchronous events.
- * These events are handled in the hypervisor, prior to return
- * to the guest OS.
- *
- * Copyright (c) 2002, K A Fraser
- */
-
-#include <xen/config.h>
-#include <xen/event.h>
-
-typedef void (*hyp_event_callback_fn_t)(void);
-
-/* Ordering must match definitions of _HYP_EVENT_* in xen/sched.h */
-static hyp_event_callback_fn_t event_call_fn[] =
-{
- __enter_scheduler,
- kill_domain,
-};
-
-/* Handle outstanding events for the currently-executing domain. */
-void do_hyp_events(void)
-{
- int nr;
- while ( (nr = ffs(current->hyp_events)) != 0 )
- (event_call_fn[nr-1])();
-}
#define INIT_EVENT_CHANNELS 16
#define MAX_EVENT_CHANNELS 1024
-static int get_free_port(struct task_struct *p)
+static int get_free_port(struct domain *p)
{
int max, port;
event_channel_t *chn;
static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
{
- struct task_struct *p1, *p2;
+ struct domain *p1, *p2;
int port1 = 0, port2 = 0;
domid_t dom1 = bind->dom1, dom2 = bind->dom2;
long rc = 0;
((p2 = find_domain_by_id(dom2)) == NULL) )
{
if ( p1 != NULL )
- put_task_struct(p1);
+ put_domain(p1);
return -ESRCH;
}
if ( p1 != p2 )
spin_unlock(&p2->event_channel_lock);
- put_task_struct(p1);
- put_task_struct(p2);
+ put_domain(p1);
+ put_domain(p2);
bind->port1 = port1;
bind->port2 = port2;
static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
{
- struct task_struct *p = current;
+ struct domain *p = current;
int virq = bind->virq;
int port;
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
{
- struct task_struct *p = current;
+ struct domain *p = current;
int pirq = bind->pirq;
int port, rc;
}
-static long __evtchn_close(struct task_struct *p1, int port1)
+static long __evtchn_close(struct domain *p1, int port1)
{
- struct task_struct *p2 = NULL;
+ struct domain *p2 = NULL;
event_channel_t *chn1, *chn2;
int port2;
long rc = 0;
if ( p2 == NULL )
{
p2 = chn1[port1].u.remote.dom;
- get_task_struct(p2);
+
+ /* If we unlock p1 then we could lose p2. Must get a reference. */
+ if ( unlikely(!get_domain(p2)) )
+ {
+ /*
+ * Failed to obtain a reference. No matter: p2 must be dying
+ * and so will close this event channel for us.
+ */
+ p2 = NULL;
+ goto out;
+ }
if ( p1->domain < p2->domain )
{
rc = -EINVAL;
goto out;
}
-
+
chn2 = p2->event_channel;
port2 = chn1[port1].u.remote.port;
{
if ( p1 != p2 )
spin_unlock(&p2->event_channel_lock);
- put_task_struct(p2);
+ put_domain(p2);
}
spin_unlock(&p1->event_channel_lock);
static long evtchn_close(evtchn_close_t *close)
{
- struct task_struct *p;
+ struct domain *p;
long rc;
domid_t dom = close->dom;
rc = __evtchn_close(p, close->port);
- put_task_struct(p);
+ put_domain(p);
return rc;
}
static long evtchn_send(int lport)
{
- struct task_struct *lp = current, *rp;
+ struct domain *lp = current, *rp;
int rport;
spin_lock(&lp->event_channel_lock);
rp = lp->event_channel[lport].u.remote.dom;
rport = lp->event_channel[lport].u.remote.port;
- get_task_struct(rp);
-
- spin_unlock(&lp->event_channel_lock);
-
evtchn_set_pending(rp, rport);
- put_task_struct(rp);
+ spin_unlock(&lp->event_channel_lock);
return 0;
}
static long evtchn_status(evtchn_status_t *status)
{
- struct task_struct *p;
+ struct domain *p;
domid_t dom = status->dom;
int port = status->port;
event_channel_t *chn;
out:
spin_unlock(&p->event_channel_lock);
- put_task_struct(p);
+ put_domain(p);
return rc;
}
}
-int init_event_channels(struct task_struct *p)
+int init_event_channels(struct domain *p)
{
spin_lock_init(&p->event_channel_lock);
p->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t),
}
-void destroy_event_channels(struct task_struct *p)
+void destroy_event_channels(struct domain *p)
{
int i;
if ( p->event_channel != NULL )
#include <asm/domain_page.h>
#include <hypervisor-ifs/dom0_ops.h>
-kmem_cache_t *task_struct_cachep;
+kmem_cache_t *domain_struct_cachep;
struct e820entry {
unsigned long addr_lo, addr_hi; /* start of memory segment */
void cmain(unsigned long magic, multiboot_info_t *mbi)
{
- struct task_struct *new_dom;
+ struct domain *new_dom;
unsigned long max_page;
unsigned char *cmdline;
module_t *mod = (module_t *)__va(mbi->mods_addr);
kmem_cache_init();
kmem_cache_sizes_init(max_page);
- task_struct_cachep = kmem_cache_create(
- "task_struct_cache", sizeof(struct task_struct),
+ domain_struct_cachep = kmem_cache_create(
+ "domain_cache", sizeof(struct domain),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if ( task_struct_cachep == NULL )
+ if ( domain_struct_cachep == NULL )
panic("No slab cache for task structs.");
start_of_day();
if ( new_dom == NULL )
panic("Error creating domain 0\n");
- set_bit(PF_PRIVILEGED, &new_dom->flags);
+ set_bit(DF_PRIVILEGED, &new_dom->flags);
shadow_mode_init();
init_trace_bufs();
- wake_up(new_dom);
-
+ domain_controller_unpause(current);
+ domain_controller_unpause(new_dom);
startup_cpu_idle_loop();
}
machine_restart(NULL);
}
-static void kill_dom0(u_char key, void *dev_id, struct pt_regs *regs)
-{
- printk("'%c' pressed -> gracefully rebooting machine\n", key);
- kill_other_domain(0, 0);
-}
-
void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
- struct task_struct *p;
+ struct domain *p;
shared_info_t *s;
s_time_t now = NOW();
for_each_domain ( p )
{
- printk("Xen: DOM %u, CPU %d [has=%c], state = ",
+ printk("Xen: DOM %u, CPU %d [has=%c]\n",
p->domain, p->processor, p->has_cpu ? 'T':'F');
- sched_prn_state(p ->state);
- printk(", hyp_events = %08x\n", p->hyp_events);
s = p->shared_info;
printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n",
s->vcpu_data[0].evtchn_upcall_pending,
add_key_handler('L', reset_sched_histo, "reset sched latency histogram");
add_key_handler('q', do_task_queues, "dump task queues + guest state");
add_key_handler('r', dump_runq, "dump run queues");
- add_key_handler('B', kill_dom0, "reboot machine gracefully");
- add_key_handler('R', halt_machine, "reboot machine ungracefully");
+ add_key_handler('R', halt_machine, "reboot machine");
#ifdef PERF_COUNTERS
add_key_handler('p', perfc_printall, "print performance counters");
add_key_handler('P', perfc_reset, "reset performance counters");
static int alloc_l2_table(struct pfn_info *page);
static int alloc_l1_table(struct pfn_info *page);
-static int get_page_from_pagenr(unsigned long page_nr, struct task_struct *p);
+static int get_page_from_pagenr(unsigned long page_nr, struct domain *p);
static int get_page_and_type_from_pagenr(unsigned long page_nr,
u32 type,
- struct task_struct *p);
+ struct domain *p);
static void free_l2_table(struct pfn_info *page);
static void free_l1_table(struct pfn_info *page);
unsigned long deferred_ops;
unsigned long cr0;
/* General-Purpose Subject, Page-Table Subject */
- struct task_struct *gps, *pts;
+ struct domain *gps, *pts;
} percpu_info[NR_CPUS] __cacheline_aligned;
/* Determine the current General-Purpose Subject or Page-Table Subject. */
spin_unlock_irqrestore(&free_list_lock, flags);
}
-static void __invalidate_shadow_ldt(struct task_struct *p)
+static void __invalidate_shadow_ldt(struct domain *p)
{
int i;
unsigned long pfn;
static inline void invalidate_shadow_ldt(void)
{
- struct task_struct *p = current;
+ struct domain *p = current;
if ( p->mm.shadow_ldt_mapcnt != 0 )
__invalidate_shadow_ldt(p);
}
/* Map shadow page at offset @off. */
int map_ldt_shadow_page(unsigned int off)
{
- struct task_struct *p = current;
+ struct domain *p = current;
unsigned long l1e;
- if ( unlikely(in_interrupt()) )
+ if ( unlikely(in_irq()) )
BUG();
__get_user(l1e, (unsigned long *)&linear_pg_table[(p->mm.ldt_base >>
}
-static int get_page_from_pagenr(unsigned long page_nr, struct task_struct *p)
+static int get_page_from_pagenr(unsigned long page_nr, struct domain *p)
{
struct pfn_info *page = &frame_table[page_nr];
static int get_page_and_type_from_pagenr(unsigned long page_nr,
u32 type,
- struct task_struct *p)
+ struct domain *p)
{
struct pfn_info *page = &frame_table[page_nr];
{
unsigned long l1v = l1_pgentry_val(l1e);
unsigned long pfn = l1_pgentry_to_pagenr(l1e);
- extern int domain_iomem_in_pfn(struct task_struct *p, unsigned long pfn);
+ extern int domain_iomem_in_pfn(struct domain *p, unsigned long pfn);
if ( !(l1v & _PAGE_PRESENT) )
return 1;
if ( unlikely(test_and_clear_bit(_PGC_tlb_flush_on_type_change,
&page->count_and_flags)) )
{
- struct task_struct *p = page->u.domain;
+ struct domain *p = page->u.domain;
mb(); /* Check zombie status before using domain ptr. */
/*
* NB. 'p' may no longer be valid by time we dereference it, so
unsigned long pfn = ptr >> PAGE_SHIFT;
unsigned long old_base_pfn;
struct pfn_info *page = &frame_table[pfn];
- struct task_struct *p = current, *q;
+ struct domain *p = current, *q;
domid_t domid;
switch ( cmd )
else
{
if ( percpu_info[cpu].gps != NULL )
- put_task_struct(percpu_info[cpu].gps);
+ put_domain(percpu_info[cpu].gps);
percpu_info[cpu].gps = find_domain_by_id(domid);
percpu_info[cpu].pts = (val & SET_PAGETABLE_SUBJECTDOM) ?
percpu_info[cpu].gps : NULL;
case MMUEXT_RESET_SUBJECTDOM:
if ( percpu_info[cpu].gps != NULL )
- put_task_struct(percpu_info[cpu].gps);
+ put_domain(percpu_info[cpu].gps);
percpu_info[cpu].gps = percpu_info[cpu].pts = NULL;
break;
if ( unlikely(percpu_info[cpu].gps != NULL) )
{
- put_task_struct(percpu_info[cpu].gps);
+ put_domain(percpu_info[cpu].gps);
percpu_info[cpu].gps = percpu_info[cpu].pts = NULL;
}
unsigned long val,
unsigned long flags)
{
- struct task_struct *p = current;
+ struct domain *p = current;
int err = 0;
unsigned int cpu = p->processor;
unsigned long deferred_ops;
domid_t domid)
{
unsigned int cpu = smp_processor_id();
- struct task_struct *p;
+ struct domain *p;
int rc;
if ( unlikely(!IS_PRIV(current)) )
rc = do_update_va_mapping(page_nr, val, flags);
- put_task_struct(p);
+ put_domain(p);
percpu_info[cpu].gps = NULL;
return rc;
int flags; /* flags for access etc */
struct pci_dev *dev; /* the device */
struct list_head node; /* link to the list */
- struct task_struct *owner; /* 'owner of this device' */
+ struct domain *owner; /* 'owner of this device' */
int state; /* state for various checks */
} phys_dev_t;
/* Find a device on a per-domain device list. */
-static phys_dev_t *find_pdev(struct task_struct *p, struct pci_dev *dev)
+static phys_dev_t *find_pdev(struct domain *p, struct pci_dev *dev)
{
phys_dev_t *t, *res = NULL;
struct list_head *tmp;
}
/* Add a device to a per-domain device-access list. */
-static void add_dev_to_task(struct task_struct *p,
+static void add_dev_to_task(struct domain *p,
struct pci_dev *dev, int acc)
{
phys_dev_t *pdev;
int physdev_pci_access_modify(
domid_t dom, int bus, int dev, int func, int enable)
{
- struct task_struct *p;
+ struct domain *p;
struct pci_dev *pdev;
int i, j, rc = 0;
return -ESRCH;
/* Make the domain privileged. */
- set_bit(PF_PHYSDEV, &p->flags);
+ set_bit(DF_PHYSDEV, &p->flags);
/* FIXME: MAW for now make the domain REALLY privileged so that it
* can run a backend driver (hw access should work OK otherwise) */
- set_bit(PF_PRIVILEGED, &p->flags);
+ set_bit(DF_PRIVILEGED, &p->flags);
/* Grant write access to the specified device. */
if ( (pdev = pci_find_slot(bus, PCI_DEVFN(dev, func))) == NULL )
/* rights to IO memory regions are checked when the domain maps them */
}
out:
- put_task_struct(p);
+ put_domain(p);
return rc;
}
/* Check if a domain controls a device with IO memory within frame @pfn.
* Returns: 1 if the domain should be allowed to map @pfn, 0 otherwise. */
-int domain_iomem_in_pfn(struct task_struct *p, unsigned long pfn)
+int domain_iomem_in_pfn(struct domain *p, unsigned long pfn)
{
int ret = 0;
struct list_head *l;
}
/* check if a domain has general access to a device */
-inline static int check_dev_acc (struct task_struct *p,
+inline static int check_dev_acc (struct domain *p,
int bus, int dev, int func,
phys_dev_t **pdev)
{
/* Domain 0 has read access to all devices. */
-void physdev_init_dom0(struct task_struct *p)
+void physdev_init_dom0(struct domain *p)
{
struct pci_dev *dev;
phys_dev_t *pdev;
}
}
- set_bit(PF_PHYSDEV, &p->flags);
+ set_bit(DF_PHYSDEV, &p->flags);
}
#include <hypervisor-ifs/sched_ctl.h>
#include <xen/trace.h>
+/*
+ * KAF -- Atropos is broken by the new scheduler interfaces.
+ * It'll need fixing to get rid of use of ATROPOS_TASK__*
+ */
+#ifdef KAF_KILLED
+
#define ATROPOS_TASK_UNBLOCKED 16
#define ATROPOS_TASK_WAIT 32
struct at_dom_info
{
/* MAW Xen additions */
- struct task_struct *owner; /* the task_struct this data belongs to */
+ struct domain *owner; /* the domain this data belongs to */
struct list_head waitq; /* wait queue */
int reason; /* reason domain was last scheduled */
}
-/** waitq_el - get the task_struct that owns a wait queue list element */
-static inline struct task_struct * waitq_el(struct list_head *l)
+/** waitq_el - get the domain that owns a wait queue list element */
+static inline struct domain *waitq_el(struct list_head *l)
{
struct at_dom_info *inf;
inf = list_entry(l, struct at_dom_info, waitq);
* These are scheduled in preference to domains with remain < 0
* in an attempt to improve interactive performance.
*/
-static void requeue(struct task_struct *sdom)
+static void requeue(struct domain *sdom)
{
struct at_dom_info *inf = DOM_INFO(sdom);
struct list_head *prev = WAITQ(sdom->processor);
if ( next == WAITQ(sdom->processor) )
list_add_tail(&inf->waitq, WAITQ(sdom->processor));
}
- else if ( sdom->state == TASK_RUNNING )
+ else if ( domain_runnable(sdom) )
{
/* insert into ordered run queue */
prev = RUNQ(sdom->processor);
list_for_each(next, RUNQ(sdom->processor))
{
- struct task_struct *p = list_entry(next, struct task_struct,
+ struct domain *p = list_entry(next, struct domain,
run_list);
if( DOM_INFO(p)->deadline > inf->deadline || is_idle_task(p) )
}
/* prepare a task to be added to scheduling */
-static void at_add_task(struct task_struct *p)
+static void at_add_task(struct domain *p)
{
s_time_t now = NOW();
* dequeue - remove a domain from any queues it is on.
* @sdom: the task to remove
*/
-static void dequeue(struct task_struct *sdom)
+static void dequeue(struct domain *sdom)
{
struct at_dom_info *inf = DOM_INFO(sdom);
* idea is to give better response times to unblocking whilst preserving QoS
* guarantees to other domains.
*/
-static void unblock(struct task_struct *sdom)
+static void unblock(struct domain *sdom)
{
s_time_t time = NOW();
struct at_dom_info *inf = DOM_INFO(sdom);
inf->slice = inf->nat_slice / ( inf->nat_period / inf->latency );
inf->period = inf->latency;
inf->remain = inf->slice;
-
- sdom->state = TASK_RUNNING;
}
else
{
*/
task_slice_t ksched_scheduler(s_time_t time)
{
- struct task_struct *cur_sdom = current; /* Current sdom */
+ struct domain *cur_sdom = current; /* Current sdom */
s_time_t newtime;
s_time_t ranfor; /* How long the domain ran */
- struct task_struct *sdom; /* tmp. scheduling domain */
+ struct domain *sdom; /* tmp. scheduling domain */
int reason; /* reason for reschedule */
int cpu = cur_sdom->processor; /* current CPU */
struct at_dom_info *cur_info;
dequeue(cur_sdom);
- if ((cur_sdom->state == TASK_RUNNING) ||
- (cur_sdom->state == ATROPOS_TASK_UNBLOCKED))
+ if ( domain_runnable(cur_sdom) ||
+ (cur_sdom->state == ATROPOS_TASK_UNBLOCKED) )
{
/* In this block, we are doing accounting for an sdom which has
inf->prevddln = inf->deadline;
inf->deadline += inf->period;
- if(inf->remain > 0)
- sdom->state = TASK_RUNNING;
- else
+ if ( inf->remain <= 0 )
sdom->state = ATROPOS_TASK_WAIT;
/* Place on the appropriate queue */
/* we guarantee there's always something on the runqueue */
cur_sdom = list_entry(RUNQ(cpu)->next,
- struct task_struct, run_list);
+ struct domain, run_list);
cur_info = DOM_INFO(cur_sdom);
newtime = time + cur_info->remain;
}
/* print relevant per-domain info for a run queue dump */
-static void at_dump_runq_el(struct task_struct *p)
+static void at_dump_runq_el(struct domain *p)
{
printk("lastschd = %llu, xtratime = %d ",
p->lastschd, DOM_INFO(p)->xtratime);
/* set or fetch domain scheduling parameters */
-static int at_adjdom(struct task_struct *p, struct sched_adjdom_cmd *cmd)
+static int at_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd)
{
if ( cmd->direction == SCHED_INFO_PUT )
{
/** at_alloc_task - allocate private info for a task */
-static int at_alloc_task(struct task_struct *p)
+static int at_alloc_task(struct domain *p)
{
ASSERT(p != NULL);
/* free memory associated with a task */
-static void at_free_task(struct task_struct *p)
+static void at_free_task(struct domain *p)
{
kmem_cache_free( dom_info_cache, DOM_INFO(p) );
}
return ret;
}
+#endif /* KAF_KILLED */
struct scheduler sched_atropos_def = {
.name = "Atropos Soft Real Time Scheduler",
.opt_name = "atropos",
.sched_id = SCHED_ATROPOS,
-
+#ifdef KAF_KILLED
.init_scheduler = at_init_scheduler,
.alloc_task = at_alloc_task,
.add_task = at_add_task,
.dump_cpu_state = at_dump_cpu_state,
.dump_runq_el = at_dump_runq_el,
.prn_state = at_prn_state,
+#endif /* KAF_KILLED */
};
*
* Returns non-zero on failure.
*/
-int bvt_alloc_task(struct task_struct *p)
+int bvt_alloc_task(struct domain *p)
{
if ( (BVT_INFO(p) = kmem_cache_alloc(dom_info_cache,GFP_KERNEL)) == NULL )
return -1;
/*
* Add and remove a domain
*/
-void bvt_add_task(struct task_struct *p)
+void bvt_add_task(struct domain *p)
{
struct bvt_dom_info *inf = BVT_INFO(p);
* bvt_free_task - free BVT private structures for a task
* @p: task
*/
-void bvt_free_task(struct task_struct *p)
+void bvt_free_task(struct domain *p)
{
ASSERT( p->sched_priv != NULL );
kmem_cache_free( dom_info_cache, p->sched_priv );
}
-void bvt_wake_up(struct task_struct *p)
+void bvt_wake_up(struct domain *p)
{
struct bvt_dom_info *inf = BVT_INFO(p);
/*
* Block the currently-executing domain until a pertinent event occurs.
*/
-static void bvt_do_block(struct task_struct *p)
+static void bvt_do_block(struct domain *p)
{
BVT_INFO(p)->warpback = 0;
}
}
/* Adjust scheduling parameter for a given domain. */
-int bvt_adjdom(struct task_struct *p,
+int bvt_adjdom(struct domain *p,
struct sched_adjdom_cmd *cmd)
{
struct bvt_adjdom *params = &cmd->u.bvt;
*/
static task_slice_t bvt_do_schedule(s_time_t now)
{
- struct task_struct *prev = current, *next = NULL, *next_prime, *p;
+ struct domain *prev = current, *next = NULL, *next_prime, *p;
struct list_head *tmp;
int cpu = prev->processor;
s32 r_time; /* time for new dom to run */
__del_from_runqueue(prev);
- if ( likely(prev->state == TASK_RUNNING) )
+ if ( domain_runnable(prev) )
__add_to_runqueue_tail(prev);
}
list_for_each ( tmp, &schedule_data[cpu].runqueue )
{
- p = list_entry(tmp, struct task_struct, run_list);
+ p = list_entry(tmp, struct domain, run_list);
p_inf = BVT_INFO(p);
if ( p_inf->evt < next_evt )
}
-static void bvt_dump_runq_el(struct task_struct *p)
+static void bvt_dump_runq_el(struct domain *p)
{
struct bvt_dom_info *inf = BVT_INFO(p);
return 0;
}
-static void bvt_pause(struct task_struct *p)
+static void bvt_pause(struct domain *p)
{
if( __task_on_runqueue(p) )
__del_from_runqueue(p);
static task_slice_t rr_do_schedule(s_time_t now)
{
- struct task_struct *prev = current;
+ struct domain *prev = current;
int cpu = current->processor;
task_slice_t ret;
__del_from_runqueue(prev);
- if ( prev->state == TASK_RUNNING )
+ if ( domain_runnable(prev) )
__add_to_runqueue_tail(prev);
ret.task = list_entry(schedule_data[cpu].runqueue.next,
- struct task_struct, run_list);
+ struct domain, run_list);
ret.time = rr_slice;
printk("rr_slice = %llu ", rr_slice);
}
-static void rr_pause(struct task_struct *p)
+static void rr_pause(struct domain *p)
{
if ( __task_on_runqueue(p) )
__del_from_runqueue(p);
*/
static struct ac_timer fallback_timer[NR_CPUS];
-extern kmem_cache_t *task_struct_cachep;
+extern kmem_cache_t *domain_struct_cachep;
-void free_task_struct(struct task_struct *p)
+void free_domain_struct(struct domain *p)
{
SCHED_OP(free_task, p);
- kmem_cache_free(task_struct_cachep, p);
+ kmem_cache_free(domain_struct_cachep, p);
}
-/**
- * alloc_task_struct - allocate a new task_struct and sched private structures
- */
-struct task_struct *alloc_task_struct(void)
+struct domain *alloc_domain_struct(void)
{
- struct task_struct *p;
+ struct domain *p;
- if ( (p = kmem_cache_alloc(task_struct_cachep,GFP_KERNEL)) == NULL )
+ if ( (p = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
return NULL;
memset(p, 0, sizeof(*p));
if ( SCHED_OP(alloc_task, p) < 0 )
{
- kmem_cache_free(task_struct_cachep,p);
+ kmem_cache_free(domain_struct_cachep,p);
return NULL;
}
/*
* Add and remove a domain
*/
-void sched_add_domain(struct task_struct *p)
+void sched_add_domain(struct domain *p)
{
- p->state = TASK_STOPPED;
+ domain_controller_pause(p);
if ( p->domain != IDLE_DOMAIN_ID )
{
TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(p->domain), _LOW32(p->domain), p);
}
-int sched_rem_domain(struct task_struct *p)
+void sched_rem_domain(struct domain *p)
{
- int x, y = p->state;
- do {
- if ( (x = y) == TASK_DYING ) return 0;
- } while ( (y = cmpxchg(&p->state, x, TASK_DYING)) != x );
-
rem_ac_timer(&p->timer);
-
SCHED_OP(rem_task, p);
-
TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(p->domain), _LOW32(p->domain), p);
-
- return 1;
}
void init_idle_task(void)
{
unsigned long flags;
- struct task_struct *p = current;
+ struct domain *p = current;
if ( SCHED_OP(alloc_task, p) < 0)
panic("Failed to allocate scheduler private data for idle task");
spin_lock_irqsave(&schedule_lock[p->processor], flags);
p->has_cpu = 1;
- p->state = TASK_RUNNING;
if ( !__task_on_runqueue(p) )
__add_to_runqueue_head(p);
spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
}
-void __wake_up(struct task_struct *p)
+/* Returns TRUE if the domain was actually woken up. */
+int domain_wakeup(struct domain *d)
{
- TRACE_3D(TRC_SCHED_WAKE, _HIGH32(p->domain), _LOW32(p->domain), p);
-
- ASSERT(p->state != TASK_DYING);
+ unsigned long flags;
+ int cpu = d->processor, woken_up = 0;
+ struct domain *curr;
+ s_time_t now, min_time;
- if ( unlikely(__task_on_runqueue(p)) )
- return;
-
- p->state = TASK_RUNNING;
+ spin_lock_irqsave(&schedule_lock[cpu], flags);
- SCHED_OP(wake_up, p);
+ if ( likely(domain_runnable(d)) && likely(!__task_on_runqueue(d)) )
+ {
+ woken_up = 1;
+ TRACE_3D(TRC_SCHED_WAKE, _HIGH32(d->domain), _LOW32(d->domain), d);
+ SCHED_OP(wake_up, d);
#ifdef WAKEUP_HISTO
- p->wokenup = NOW();
+ p->wokenup = NOW();
#endif
+
+ ASSERT(__task_on_runqueue(d));
+ ASSERT(!d->has_cpu);
+
+ now = NOW();
+ curr = schedule_data[cpu].curr;
+
+ /* Currently-running domain should run at least for ctx_allow. */
+ min_time = curr->lastschd + curr->min_slice;
+
+ if ( is_idle_task(curr) || (min_time <= now) )
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+ else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
+ mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
+ }
+
+ spin_unlock_irqrestore(&schedule_lock[cpu], flags);
+
+ return woken_up;
}
-void wake_up(struct task_struct *p)
+void __domain_pause(struct domain *d)
{
unsigned long flags;
- spin_lock_irqsave(&schedule_lock[p->processor], flags);
- __wake_up(p);
- spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
+ int cpu = d->processor;
+
+ spin_lock_irqsave(&schedule_lock[cpu], flags);
+
+ if ( d->has_cpu )
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+ else if ( __task_on_runqueue(d) )
+ __del_from_runqueue(d);
+
+ spin_unlock_irqrestore(&schedule_lock[cpu], flags);
+
+ /* Synchronous. */
+ while ( d->has_cpu )
+ {
+ smp_mb();
+ cpu_relax();
+ }
}
-/*
- * Block the currently-executing domain until a pertinent event occurs.
- */
+
+/* Block the currently-executing domain until a pertinent event occurs. */
long do_block(void)
{
ASSERT(current->domain != IDLE_DOMAIN_ID);
current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0;
- current->state = TASK_INTERRUPTIBLE;
+ set_bit(DF_BLOCKED, ¤t->flags);
TRACE_2D(TRC_SCHED_BLOCK, current->domain, current);
__enter_scheduler();
return 0;
break;
}
- case SCHEDOP_stop:
+ case SCHEDOP_suspend:
{
- stop_domain((u8)(op >> SCHEDOP_reasonshift));
+ domain_suspend((u8)(op >> SCHEDOP_reasonshift));
break;
}
return ret;
}
-
-/*
- * sched_pause_sync - synchronously pause a domain's execution.
- * XXXX This is horribly broken -- here just as a place holder at present,
- * do not use.
- */
-void sched_pause_sync(struct task_struct *p)
-{
- unsigned long flags;
- int cpu = p->processor;
-
- spin_lock_irqsave(&schedule_lock[cpu], flags);
-
- /* If not the current task, we can remove it from scheduling now. */
- if ( schedule_data[cpu].curr != p )
- SCHED_OP(pause, p);
-
- p->state = TASK_PAUSED;
-
- spin_unlock_irqrestore(&schedule_lock[cpu], flags);
-
- /* Spin until domain is descheduled by its local scheduler. */
- while ( schedule_data[cpu].curr == p )
- {
- send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
- do_yield();
- }
-
- /* The domain will not be scheduled again until we do a wake_up(). */
-}
-
/* Per-domain one-shot-timer hypercall. */
long do_set_timer_op(unsigned long timeout_hi, unsigned long timeout_lo)
{
- struct task_struct *p = current;
+ struct domain *p = current;
rem_ac_timer(&p->timer);
/* Adjust scheduling parameter for a given domain. */
long sched_adjdom(struct sched_adjdom_cmd *cmd)
{
- struct task_struct *p;
+ struct domain *p;
if ( cmd->sched_id != ops.sched_id )
return -EINVAL;
SCHED_OP(adjdom, p, cmd);
- put_task_struct(p);
+ put_domain(p);
return 0;
}
-/*
- * cause a run through the scheduler when appropriate
- * Appropriate is:
- * - current task is idle task
- * - the current task already ran for it's context switch allowance
- * Otherwise we do a run through the scheduler after the current tasks
- * context switch allowance is over.
- */
-unsigned long __reschedule(struct task_struct *p)
-{
- int cpu = p->processor;
- struct task_struct *curr;
- s_time_t now, min_time;
-
- TRACE_3D(TRC_SCHED_RESCHED, _HIGH32(p->domain), _LOW32(p->domain), p);
-
- if ( unlikely(p->has_cpu || !__task_on_runqueue(p)) )
- return 0;
-
- now = NOW();
- curr = schedule_data[cpu].curr;
- /* domain should run at least for ctx_allow */
- min_time = curr->lastschd + curr->min_slice;
-
- if ( is_idle_task(curr) || (min_time <= now) )
- {
- set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
- return (1 << p->processor);
- }
-
- /* current hasn't been running for long enough -> reprogram timer.
- * but don't bother if timer would go off soon anyway */
- if ( schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP )
- mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
-
- return SCHED_OP(reschedule, p);
-}
-
-void reschedule(struct task_struct *p)
-{
- unsigned long flags, cpu_mask;
-
- spin_lock_irqsave(&schedule_lock[p->processor], flags);
- cpu_mask = __reschedule(p);
-
- spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
-
-#ifdef CONFIG_SMP
- cpu_mask &= ~(1 << smp_processor_id());
- if ( cpu_mask != 0 )
- smp_send_event_check_mask(cpu_mask);
-#endif
-}
-
/*
* The main function
* - deschedule the current domain (scheduler independent).
* - pick a new domain (scheduler dependent).
*/
-asmlinkage void __enter_scheduler(void)
+void __enter_scheduler(void)
{
- struct task_struct *prev = current, *next = NULL;
+ struct domain *prev = current, *next = NULL;
int cpu = prev->processor;
s_time_t now;
task_slice_t next_slice;
perfc_incrc(sched_run);
- clear_bit(_HYP_EVENT_NEED_RESCHED, &prev->hyp_events);
-
spin_lock_irq(&schedule_lock[cpu]);
now = NOW();
rem_ac_timer(&schedule_data[cpu].s_timer);
- ASSERT(!in_interrupt());
+ ASSERT(!in_irq());
ASSERT(__task_on_runqueue(prev));
- ASSERT(prev->state != TASK_UNINTERRUPTIBLE);
- if ( prev->state == TASK_INTERRUPTIBLE )
+ if ( test_bit(DF_BLOCKED, &prev->flags) )
{
- /* this check is needed to avoid a race condition */
- if ( signal_pending(prev) )
- prev->state = TASK_RUNNING;
+ /* This check is needed to avoid a race condition. */
+ if ( event_pending(prev) )
+ clear_bit(DF_BLOCKED, &prev->flags);
else
SCHED_OP(do_block, prev);
}
switch_to(prev, next);
- if ( unlikely(prev->state == TASK_DYING) )
- put_task_struct(prev);
-
/* Mark a timer event for the newly-scheduled domain. */
if ( !is_idle_task(next) )
send_guest_virq(next, VIRQ_TIMER);
/* No locking needed -- pointer comparison is safe :-) */
int idle_cpu(int cpu)
{
- struct task_struct *p = schedule_data[cpu].curr;
+ struct domain *p = schedule_data[cpu].curr;
return p == idle_task[cpu];
}
static void s_timer_fn(unsigned long unused)
{
TRACE_0D(TRC_SCHED_S_TIMER_FN);
- set_bit(_HYP_EVENT_NEED_RESCHED, ¤t->hyp_events);
+ raise_softirq(SCHEDULE_SOFTIRQ);
perfc_incrc(sched_irq);
}
/* Periodic tick timer: send timer event to current domain*/
static void t_timer_fn(unsigned long unused)
{
- struct task_struct *p = current;
+ struct domain *p = current;
TRACE_0D(TRC_SCHED_T_TIMER_FN);
/* Domain timer function, sends a virtual timer interrupt to domain */
static void dom_timer_fn(unsigned long data)
{
- struct task_struct *p = (struct task_struct *)data;
+ struct domain *p = (struct domain *)data;
TRACE_0D(TRC_SCHED_DOM_TIMER_FN);
send_guest_virq(p, VIRQ_TIMER);
}
/* Fallback timer to ensure guests get time updated 'often enough'. */
static void fallback_timer_fn(unsigned long unused)
{
- struct task_struct *p = current;
+ struct domain *p = current;
TRACE_0D(TRC_SCHED_FALLBACK_TIMER_FN);
{
int i;
+ open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler);
+
for ( i = 0; i < NR_CPUS; i++ )
{
INIT_LIST_HEAD(&schedule_data[i].runqueue);
{
struct list_head *list;
int loop = 0;
- struct task_struct *p;
+ struct domain *p;
printk ("QUEUE %s %lx n: %lx, p: %lx\n", name, (unsigned long)queue,
(unsigned long) queue->next, (unsigned long) queue->prev);
list_for_each (list, queue) {
- p = list_entry(list, struct task_struct, run_list);
+ p = list_entry(list, struct domain, run_list);
printk("%3d: %u has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
SCHED_OP(dump_runq_el, p);
printk("c=0x%X%08X\n", (u32)(p->cpu_time>>32), (u32)p->cpu_time);
return;
}
-/* print human-readable "state", given the numeric code for that state */
-void sched_prn_state(int state)
-{
- int ret = 0;
-
- switch(state)
- {
- case TASK_RUNNING:
- printk("Running");
- break;
- case TASK_INTERRUPTIBLE:
- printk("Int sleep");
- break;
- case TASK_UNINTERRUPTIBLE:
- printk("UInt sleep");
- break;
- case TASK_STOPPED:
- printk("Stopped");
- break;
- case TASK_DYING:
- printk("Dying");
- break;
- default:
- ret = SCHED_OP(prn_state, state);
- }
-
- if ( ret != 0 )
- printk("Unknown");
-}
-
#if defined(WAKEUP_HISTO) || defined(BLOCKTIME_HISTO)
void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
{
{
}
-int shadow_mode_enable( struct task_struct *p, unsigned int mode )
+int shadow_mode_enable( struct domain *p, unsigned int mode )
{
struct mm_struct *m = &p->mm;
struct shadow_status **fptr;
return -ENOMEM;
}
-void shadow_mode_disable( struct task_struct *p )
+void shadow_mode_disable( struct domain *p )
{
struct mm_struct *m = &p->mm;
struct shadow_status *next;
kfree( &m->shadow_ht[0] );
}
-static int shadow_mode_table_op( struct task_struct *p,
+static int shadow_mode_table_op( struct domain *p,
dom0_shadow_control_t *sc )
{
unsigned int op = sc->op;
0, bytes);
}
- if (zero)
- {
- /* might as well stop the domain as an optimization. */
- if ( p->state != TASK_STOPPED )
- send_guest_virq(p, VIRQ_STOP);
- }
+ /* Might as well stop the domain as an optimization. */
+ if ( zero )
+ domain_controller_pause(p);
break;
}
return rc;
}
-int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc )
+int shadow_mode_control( struct domain *p, dom0_shadow_control_t *sc )
{
unsigned int cmd = sc->op;
int rc = 0;
if (flags & SLAB_NO_GROW)
return 0;
-#if 0
- if (in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC)
- BUG();
-#endif
-
ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & SLAB_LEVEL_MASK);
if (local_flags == SLAB_ATOMIC)
/******************************************************************************
* common/softirq.c
*
- * Modified from the Linux original. Softirqs in Xen are only executed in
- * an outermost activation (e.g., never within an interrupt activation).
- * This simplifies some things and generally seems a good thing.
+ * Softirqs in Xen are only executed in an outermost activation (e.g., never
+ * within an interrupt activation). This simplifies some things and generally
+ * seems a good thing.
*
* Copyright (c) 2003, K A Fraser
- *
- * Copyright (C) 1992 Linus Torvalds
+ * Copyright (c) 1992, Linus Torvalds
*/
#include <xen/config.h>
irq_cpustat_t irq_stat[NR_CPUS];
-static struct softirq_action softirq_vec[32] __cacheline_aligned;
+static softirq_handler softirq_handlers[NR_SOFTIRQS] __cacheline_aligned;
asmlinkage void do_softirq()
{
unsigned int pending, cpu = smp_processor_id();
- struct softirq_action *h;
-
- if ( unlikely(in_interrupt()) )
- BUG();
-
- /*
- * XEN: This isn't real mutual-exclusion: it just ensures that in_softirq()
- * and in_interrupt() are both TRUE, allowing checks for erroneous reentry.
- */
- cpu_bh_disable(cpu);
+ softirq_handler *h;
while ( (pending = xchg(&softirq_pending(cpu), 0)) != 0 )
{
- h = softirq_vec;
+ h = softirq_handlers;
while ( pending )
{
if ( pending & 1 )
- h->action(h);
+ (*h)();
h++;
pending >>= 1;
}
}
-
- cpu_bh_enable(cpu);
}
inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
__cpu_raise_softirq(smp_processor_id(), nr);
}
-void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
-{
- softirq_vec[nr].data = data;
- softirq_vec[nr].action = action;
-}
-
-
-/* Tasklets */
-
-struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
-struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
-
-void __tasklet_schedule(struct tasklet_struct *t)
-{
- int cpu = smp_processor_id();
- unsigned long flags;
-
- local_irq_save(flags);
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_restore(flags);
-}
-
-void __tasklet_hi_schedule(struct tasklet_struct *t)
-{
- int cpu = smp_processor_id();
- unsigned long flags;
-
- local_irq_save(flags);
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_restore(flags);
-}
-
-static void tasklet_action(struct softirq_action *a)
-{
- int cpu = smp_processor_id();
- struct tasklet_struct *list;
-
- local_irq_disable();
- list = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = NULL;
- local_irq_enable();
-
- while ( list != NULL )
- {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- if ( likely(tasklet_trylock(t)) )
- {
- if ( likely(!atomic_read(&t->count)) )
- {
- if ( unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED,
- &t->state)) )
- BUG();
- t->func(t->data);
- }
- tasklet_unlock(t);
- continue;
- }
-
- local_irq_disable();
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_enable();
- }
-}
-
-static void tasklet_hi_action(struct softirq_action *a)
-{
- int cpu = smp_processor_id();
- struct tasklet_struct *list;
-
- local_irq_disable();
- list = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = NULL;
- local_irq_enable();
-
- while ( list != NULL )
- {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- if ( likely(tasklet_trylock(t)) )
- {
- if ( likely(!atomic_read(&t->count)) )
- {
- if ( unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED,
- &t->state)) )
- BUG();
- t->func(t->data);
- }
- tasklet_unlock(t);
- continue;
- }
-
- local_irq_disable();
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_enable();
- }
-}
-
-
-void tasklet_init(struct tasklet_struct *t,
- void (*func)(unsigned long), unsigned long data)
-{
- t->next = NULL;
- t->state = 0;
- atomic_set(&t->count, 0);
- t->func = func;
- t->data = data;
-}
-
-void tasklet_kill(struct tasklet_struct *t)
-{
- if ( in_interrupt() )
- BUG();
- while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
- while ( test_bit(TASKLET_STATE_SCHED, &t->state) )
- do_softirq();
- tasklet_unlock_wait(t);
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-}
-
-void __init softirq_init()
+void open_softirq(int nr, softirq_handler handler)
{
- open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
- open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
+ softirq_handlers[nr] = handler;
}
unsigned long nr_pages;
char *rawbuf;
struct t_buf *buf;
- struct task_struct *dom0;
+ struct domain *dom0;
if ( opt_tbuf_size == 0 )
{
for( i = 0; i < nr_pages; i++)
SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf+(i*PAGE_SIZE)), dom0);
- put_task_struct(dom0);
+ put_domain(dom0);
for ( i = 0; i < smp_num_cpus; i++ )
{
static void __serial_rx(unsigned char c, struct pt_regs *regs)
{
key_handler *handler;
- struct task_struct *p;
+ struct domain *p;
if ( xen_rx )
{
{
p = find_domain_by_id(0); /* only DOM0 reads the serial buffer */
send_guest_virq(p, VIRQ_CONSOLE);
- put_task_struct(p);
+ put_domain(p);
}
}
}
return 0;
#else
- if ( !test_and_set_bit(PF_CONSOLEWRITEBUG, ¤t->flags) )
+ if ( !test_and_set_bit(DF_CONSOLEWRITEBUG, ¤t->flags) )
{
printk("DOM%u is attempting to use the deprecated "
"HYPERVISOR_console_write() interface.\n", current->domain);
+++ /dev/null
-/******************************************************************************
- * keyboard.c
- *
- * Driver for IBM PC AT- and PS/2-compatible keyboards.
- *
- * This file contains portions of code from Linux.
- */
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <xen/sched.h>
-#include <xen/keyhandler.h>
-#include <hypervisor-ifs/kbd.h>
-#include <xen/event.h>
-#include <xen/console.h>
-#include <xen/interrupt.h>
-
-/* Hash-defines torn from <xen/pc_keyb.h> and <asm/keyboard.h> */
-
-#define KBD_STATUS_REG 0x64 /* Status register (R) */
-#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
-#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
-
-/* register status bits */
-#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
-#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
-#define KBD_STAT_SELFTEST 0x04 /* Self test successful */
-#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */
-
-#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
-#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
-#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
-#define KBD_STAT_PERR 0x80 /* Parity error */
-
-#define kbd_read_input() inb(KBD_DATA_REG)
-#define kbd_read_status() inb(KBD_STATUS_REG)
-
-#define KEYBOARD_IRQ 1
-#define AUX_IRQ 12
-
-#define kbd_write_output(val) outb(val, KBD_DATA_REG)
-#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
-
-#ifdef CONFIG_XEN_ATTENTION_KEY
-
-static int xen_attention_key_down = 0;
-#define XEN_ATTENTION_KEY 0x46 /* Scroll Lock */
-#define KBD_SCANCODE_KEYUP_MASK 0x80
-
-/* Simple scancode-to-key mappings for internal Xen use. */
-
-static unsigned char keymap_normal[] =
-{
- 0 , 0 ,'1','2', '3','4','5','6', '7','8','9','0', '-','=','\b','\t',
- 'q','w','e','r', 't','y','u','i', 'o','p','[',']','\r', 0 ,'a','s',
- 'd','f','g','h', 'j','k','l',';', '\'','`', 0 ,'#', 'z','x','c','v',
- 'b','n','m',',', '.','/', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
-
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 0 , 0 , 0 , 0 , 0 , 0 ,'\\', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0
-};
-
-static unsigned char keymap_shift[] =
-{
- 0 , 0 ,'!','"', '#','$','%','^', '&','*','(',')', '_','+','\b','\t',
- 'Q','W','E','R', 'T','Y','U','I', 'O','P','{','}','\r', 0 ,'A','S',
- 'D','F','G','H', 'J','K','L',':', '@', 0 , 0 ,'~', 'Z','X','C','V',
- 'B','N','M','<', '>','?', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
-
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 0 , 0 , 0 , 0 , 0 , 0 ,'|', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0
-};
-
-
-static int keyboard_shift = 0;
-
-static unsigned char convert_scancode (unsigned char scancode)
-{
- unsigned char value = 0;
-
- switch ( scancode )
- {
-
- case 0xaa: /* shift (left) UP */
- case 0xb6: /* shift (right) UP */
- keyboard_shift = 0;
- break;
-
- case 0x2a: /* shift (left) DOWN */
- case 0x36: /* shift (right) DOWN */
- keyboard_shift = 1;
- break;
-
- default:
- /* Only process key-up events */
- if(!(scancode & KBD_SCANCODE_KEYUP_MASK))
- break;
- scancode = scancode & ~KBD_SCANCODE_KEYUP_MASK;
- if (keyboard_shift)
- value = keymap_shift[scancode];
- else
- value = keymap_normal[scancode];
- break;
- }
-
- return value;
-}
-
-#endif /* CONFIG_XEN_ATTENTION_KEY */
-
-
-/* We store kbd events awaiting receive by a guest OS in a ring buffer. */
-#define KBD_RING_SIZE 64
-static int kbd_ring[KBD_RING_SIZE];
-static int kbd_ring_prod = 0;
-static int kbd_ring_cons = 0;
-
-#define KBD_RING_INC(_i) (((_i)+1) & (KBD_RING_SIZE-1))
-#define KBD_RING_FULL (KBD_RING_INC(kbd_ring_prod) == kbd_ring_cons)
-#define KBD_RING_EMPTY (kbd_ring_prod == kbd_ring_cons)
-
-static void kbd_ring_push(unsigned char status, unsigned char scancode)
-{
- if ( KBD_RING_FULL )
- return;
-
- kbd_ring[kbd_ring_prod] = KBD_CODE(scancode, status);
- kbd_ring_prod = KBD_RING_INC(kbd_ring_prod);
-}
-
-static int kbd_ring_pop(void)
-{
- int ret;
-
- if ( KBD_RING_EMPTY )
- {
- /* Read directly from controller - no events waiting in ring. */
- unsigned char status = kbd_read_status();
- unsigned char scancode = kbd_read_input();
- ret = KBD_CODE(scancode, status);
- }
- else
- {
- ret = kbd_ring[kbd_ring_cons];
- kbd_ring_cons = KBD_RING_INC(kbd_ring_cons);
- }
-
- return ret;
-}
-
-
-/*
- * NB. Lock is essential as there are two distinct interrupts (keyboard + aux).
- * Also interrupts may disturb guest OS actions.
- */
-static spinlock_t kbd_lock;
-
-long do_kbd_op(unsigned char op, unsigned char val)
-{
- unsigned long flags;
- long ret = -EINVAL;
-
- if ( !CONSOLE_ISOWNER(current) )
- return -EPERM;
-
- spin_lock_irqsave(&kbd_lock, flags);
-
- switch ( op )
- {
- case KBD_OP_WRITEOUTPUT:
- kbd_write_output(val);
- ret = 0L;
- break;
- case KBD_OP_WRITECOMMAND:
- kbd_write_command(val);
- ret = 0L;
- break;
- case KBD_OP_READ:
- ret = kbd_ring_pop();
- break;
- }
-
- spin_unlock_irqrestore(&kbd_lock, flags);
-
- return ret;
-}
-
-
-static void keyboard_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- unsigned char status=0, scancode;
- unsigned int work = 1000;
- unsigned long flags;
- struct task_struct *p = CONSOLE_OWNER;
-
- spin_lock_irqsave(&kbd_lock, flags);
-
- while ( (--work > 0) && ((status = kbd_read_status()) & KBD_STAT_OBF) )
- {
- scancode = kbd_read_input();
-
-#ifdef CONFIG_XEN_ATTENTION_KEY
- if ( !(status & (KBD_STAT_GTO | KBD_STAT_PERR | KBD_STAT_MOUSE_OBF)) )
- {
- if ( (scancode & ~KBD_SCANCODE_KEYUP_MASK) == XEN_ATTENTION_KEY )
- {
- xen_attention_key_down = !(scancode & KBD_SCANCODE_KEYUP_MASK);
- continue;
- }
- else if ( xen_attention_key_down )
- {
- key_handler *handler;
- unsigned char key;
- spin_unlock_irqrestore(&kbd_lock, flags);
- key = convert_scancode(scancode);
- if ( key && (handler = get_key_handler(key)) )
- (*handler)(key, dev_id, regs);
- spin_lock_irqsave(&kbd_lock, flags);
- continue;
- }
- }
-#endif
-
- if ( p != NULL )
- {
- kbd_ring_push(status, scancode);
- send_guest_virq(p, VIRQ_PS2);
- }
- }
-
- if ( !work )
- printk(KERN_ERR "xen_keyb: controller jammed (0x%02X).\n", status);
-
- spin_unlock_irqrestore(&kbd_lock, flags);
-
- if ( p != NULL )
- put_task_struct(p);
-}
-
-
-static struct irqaction keyb = { keyboard_interrupt, "Keyboard", NULL };
-static struct irqaction aux = { keyboard_interrupt, "PS/2 Mouse", NULL };
-
-void initialize_keyboard()
-{
- spin_lock_init(&kbd_lock);
- (void)setup_irq(KEYBOARD_IRQ, &keyb);
- (void)setup_irq(AUX_IRQ, &aux);
-}
-
typedef struct {
unsigned int __softirq_pending;
unsigned int __local_irq_count;
- unsigned int __local_bh_count;
unsigned int __nmi_count;
unsigned long idle_timestamp;
} ____cacheline_aligned irq_cpustat_t;
#include <xen/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
- */
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
#include <asm/processor.h>
extern void init_fpu(void);
-extern void save_init_fpu( struct task_struct *tsk );
-extern void restore_fpu( struct task_struct *tsk );
+extern void save_init_fpu( struct domain *tsk );
+extern void restore_fpu( struct domain *tsk );
#define unlazy_fpu( tsk ) do { \
- if ( test_bit(PF_USEDFPU, &tsk->flags) ) \
+ if ( test_bit(DF_USEDFPU, &tsk->flags) ) \
save_init_fpu( tsk ); \
} while (0)
#define clear_fpu( tsk ) do { \
- if ( test_and_clear_bit(PF_USEDFPU, &tsk->flags) ) { \
+ if ( test_and_clear_bit(DF_USEDFPU, &tsk->flags) ) { \
asm volatile("fwait"); \
stts(); \
} \
#ifndef __ASSEMBLY__
-static inline void load_LDT(struct task_struct *p)
+static inline void load_LDT(struct domain *p)
{
unsigned int cpu;
struct desc_struct *desc;
unsigned long kernelstack; /* TOS for current process */
unsigned long oldrsp; /* user rsp for system call */
unsigned long irqrsp; /* Old rsp for interrupts. */
- struct task_struct *pcurrent; /* Current process */
+ struct domain *pcurrent; /* Current process */
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */
#include <xen/spinlock.h>
#include <hypervisor-ifs/hypervisor-if.h>
-struct task_struct;
+struct domain;
/*
* Default implementation of macro that returns current
/*
* Size of io_bitmap in longwords:
* For Xen we support the full 8kbyte IO bitmap but use the io_bitmap_sel field
- * of the task_struct to avoid a full 8kbyte copy when switching to / from
- * domains with bits cleared.
+ * to avoid a full 8kbyte copy when switching to domains with bits cleared.
*/
#define IO_BITMAP_SIZE 2048
#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
&((_p)->fast_trap_desc), 8))
#endif
-long set_fast_trap(struct task_struct *p, int idx);
+long set_fast_trap(struct domain *p, int idx);
#define INIT_THREAD { \
0, 0, \
#define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
#define GET_GDT_ADDRESS(_p) ((*(unsigned long *)((_p)->mm.gdt + 2)))
-long set_gdt(struct task_struct *p,
+long set_gdt(struct domain *p,
unsigned long *frames,
unsigned int entries);
-long set_debugreg(struct task_struct *p, int reg, unsigned long value);
+long set_debugreg(struct domain *p, int reg, unsigned long value);
struct microcode {
unsigned int hdrver;
+++ /dev/null
-#ifndef __ASM_SOFTIRQ_H
-#define __ASM_SOFTIRQ_H
-
-#include <asm/atomic.h>
-#include <asm/hardirq.h>
-
-#define cpu_bh_enable(cpu) \
- do { barrier(); local_bh_count(cpu)--; } while (0)
-#define cpu_bh_disable(cpu) \
- do { local_bh_count(cpu)++; barrier(); } while (0)
-
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-
-#endif /* __ASM_SOFTIRQ_H */
#ifndef _X86_CURRENT_H
#define _X86_CURRENT_H
-struct task_struct;
+struct domain;
#define STACK_RESERVED \
- (sizeof(execution_context_t) + sizeof(struct task_struct *))
+ (sizeof(execution_context_t) + sizeof(struct domain *))
-static inline struct task_struct * get_current(void)
+static inline struct domain * get_current(void)
{
- struct task_struct *current;
+ struct domain *current;
__asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0"
: "=r" (current) : "0" (STACK_SIZE-4) );
return current;
#define current get_current()
-static inline void set_current(struct task_struct *p)
+static inline void set_current(struct domain *p)
{
__asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)"
: : "r" (STACK_SIZE-4), "r" (p) );
#define _X86_64_CURRENT_H
#if !defined(__ASSEMBLY__)
-struct task_struct;
+struct domain;
#include <asm/pda.h>
#define STACK_RESERVED \
(sizeof(execution_context_t))
-static inline struct task_struct * get_current(void)
+static inline struct domain * get_current(void)
{
- struct task_struct *current;
+ struct domain *current;
current = read_pda(pcurrent);
return current;
}
#define current get_current()
-static inline void set_current(struct task_struct *p)
+static inline void set_current(struct domain *p)
{
write_pda(pcurrent,p);
}
#ifndef __ASSEMBLY__
-static inline void load_LDT(struct task_struct *p)
+static inline void load_LDT(struct domain *p)
{
unsigned long ents;
typedef struct {
/* IN parameters. */
domid_t domain; /* 0 */
- /* hack to indicate that you want to wait for other domain -- replace
- with proper sychronous stop soon! */
- u32 sync; /* 4 */
-} PACKED dom0_stopdomain_t; /* 8 bytes */
+} PACKED dom0_stopdomain_t; /* 4 bytes */
#define DOM0_GETDOMAININFO 12
typedef struct {
domid_t domain; /* 0 */ /* NB. IN/OUT variable. */
/* OUT variables. */
#define DOMSTATE_CRASHED 0 /* Crashed domain; frozen for postmortem. */
-#define DOMSTATE_STOPPED 1 /* Domain voluntarily halted it execution. */
+#define DOMSTATE_SUSPENDED 1 /* Domain voluntarily halted it execution. */
#define DOMSTATE_PAUSED 2 /* Currently paused (forced non-schedulable). */
#define DOMSTATE_BLOCKED 3 /* Currently blocked pending a wake-up event. */
#define DOMSTATE_RUNNABLE 4 /* Currently runnable. */
memory_t shared_info_frame; /* 48: MFN of shared_info struct */
MEMORY_PADDING;
u64 cpu_time; /* 56 */
- u32 hyp_events; /* 64 */
-} PACKED dom0_getdomaininfo_t; /* 68 bytes */
+} PACKED dom0_getdomaininfo_t; /* 64 bytes */
#define DOM0_BUILDDOMAIN 13
typedef struct {
#define __HYPERVISOR_set_fast_trap 15
#define __HYPERVISOR_dom_mem_op 16
#define __HYPERVISOR_multicall 17
-#define __HYPERVISOR_kbd_op 18
#define __HYPERVISOR_update_va_mapping 19
#define __HYPERVISOR_set_timer_op 20
#define __HYPERVISOR_event_channel_op 21
*
* Virtual interrupts that a guest OS may receive from the hypervisor.
*/
-#define VIRQ_BLKDEV 0 /* (OBS) A block device response has been queued. */
-#define VIRQ_TIMER 1 /* A timeout has been updated. */
-#define VIRQ_DIE 2 /* (OBS) OS is about to be killed. Clean up! */
-#define VIRQ_DEBUG 3 /* Request guest to dump debug info (gross!) */
-#define VIRQ_NET 4 /* (OBS) There are packets for transmission. */
-#define VIRQ_PS2 5 /* (OBS) PS/2 keyboard or mouse event(s) */
-#define VIRQ_STOP 6 /* (OBS) Prepare for stopping and pickling */
-#define VIRQ_EVTCHN 7 /* Event pending on an event channel */
-#define VIRQ_VBD_UPD 8 /* (OBS) Event to signal VBDs should be reprobed */
-#define VIRQ_CONSOLE 9 /* (DOM0) bytes received on master console. */
-#define VIRQ_PHYSIRQ 10 /* Pending physical IRQs. */
-#define VIRQ_MISDIRECT 11 /* Catch-all virtual interrupt. */
-#define VIRQ_DOM_EXC 12 /* (DOM0) Exceptional event for some domain. */
-#define NR_VIRQS 13
+#define VIRQ_MISDIRECT 0 /* Catch-all interrupt for unbound VIRQs. */
+#define VIRQ_TIMER 1 /* Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 2 /* Request guest to dump debug info. */
+#define VIRQ_CONSOLE 3 /* (DOM0) bytes received on emergency console. */
+#define VIRQ_DOM_EXC 4 /* (DOM0) Exceptional event for some domain. */
+#define NR_VIRQS 5
/*
* MMU-UPDATE REQUESTS
/*
* Commands to HYPERVISOR_sched_op().
*/
-#define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */
-#define SCHEDOP_block 1 /* Block until an event is received. */
-#define SCHEDOP_stop 4 /* Stop executing this domain. */
+#define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */
+#define SCHEDOP_block 1 /* Block until an event is received. */
+#define SCHEDOP_suspend 2 /* Stop executing this domain. */
#define SCHEDOP_cmdmask 255 /* 8-bit command. */
-#define SCHEDOP_reasonshift 8 /* 8-bit stop code. (SCHEDOP_stop only) */
+#define SCHEDOP_reasonshift 8 /* 8-bit suspend code. (SCHEDOP_suspend) */
/*
* Commands to HYPERVISOR_console_io().
+++ /dev/null
-/******************************************************************************
- * kbd.h
- *
- * PS/2 interface definitions
- * Copyright (c) 2003 James Scott, Intel Research Cambridge
- */
-
-#ifndef __HYPERVISOR_KBD_H__
-#define __HYPERVISOR_KBD_H__
-
-
-#define KBD_OP_WRITEOUTPUT 0
-#define KBD_OP_WRITECOMMAND 1
-#define KBD_OP_READ 2
-
-#define KBD_CODE_SCANCODE(_r) ((unsigned char)((_r) & 0xff))
-#define KBD_CODE_STATUS(_r) ((unsigned char)(((_r) >> 8) & 0xff))
-#define KBD_CODE(_c, _s) ((int)(((_c) & 0xff) | (((_s) & 0xff) << 8)))
-
-#endif
#ifndef __ASSEMBLY__
#include <xen/compiler.h>
-extern unsigned int opt_ser_baud;
-#define SERIAL_ENABLED (opt_ser_baud != 0)
#endif
#endif /* __XEN_CONFIG_H__ */
* xen/console.h
*
* Xen header file concerning console access.
- *
- * Copyright (c) 2003 James Scott, Intel Research Cambridge
*/
#ifndef __CONSOLE_H__
extern spinlock_t console_lock;
-/*
- * Ownership of console --- currently hardwired to dom0. This is used to see
- * who gets the PS/2 keyboard/mouse events
- */
-#define CONSOLE_ISOWNER(p) (p->domain == 0)
-#define CONSOLE_OWNER (find_domain_by_id(0))
-
void set_printk_prefix(const char *prefix);
#define CONSOLE_RING_CLEAR 1
*/
/* Schedule an asynchronous callback for the specified domain. */
-static inline void guest_schedule_to_run(struct task_struct *p)
+static inline void guest_async_callback(struct domain *p)
{
-#ifdef CONFIG_SMP
- unsigned long flags, cpu_mask;
-
- spin_lock_irqsave(&schedule_lock[p->processor], flags);
- if ( p->state == TASK_INTERRUPTIBLE )
- __wake_up(p);
- cpu_mask = __reschedule(p);
- if ( p->has_cpu )
- cpu_mask |= 1 << p->processor;
- spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
-
- cpu_mask &= ~(1 << smp_processor_id());
- if ( cpu_mask != 0 )
- smp_send_event_check_mask(cpu_mask);
-#else
- if ( p->state == TASK_INTERRUPTIBLE )
- wake_up(p);
- reschedule(p);
-#endif
+ if ( !domain_unblock(p) && p->has_cpu && (p != current) )
+ smp_send_event_check_mask(1 << p->processor);
}
/*
* may require explicit memory barriers.
*/
-static inline void evtchn_set_pending(struct task_struct *p, int port)
+static inline void evtchn_set_pending(struct domain *p, int port)
{
shared_info_t *s = p->shared_info;
if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
{
/* The VCPU pending flag must be set /after/ update to evtchn-pend. */
s->vcpu_data[0].evtchn_upcall_pending = 1;
- guest_schedule_to_run(p);
+ guest_async_callback(p);
}
}
-static inline void evtchn_set_exception(struct task_struct *p, int port)
+static inline void evtchn_set_exception(struct domain *p, int port)
{
if ( !test_and_set_bit(port, &p->shared_info->evtchn_exception[0]) )
evtchn_set_pending(p, port);
* @p: Domain to which virtual IRQ should be sent
* @virq: Virtual IRQ number (VIRQ_*)
*/
-static inline void send_guest_virq(struct task_struct *p, int virq)
+static inline void send_guest_virq(struct domain *p, int virq)
{
evtchn_set_pending(p, p->virq_to_evtchn[virq]);
}
* @p: Domain to which physical IRQ should be sent
* @pirq: Physical IRQ number
*/
-static inline void send_guest_pirq(struct task_struct *p, int pirq)
+static inline void send_guest_pirq(struct domain *p, int pirq)
{
evtchn_set_pending(p, p->pirq_to_evtchn[pirq]);
}
-
-/*
- * HYPERVISOR-HANDLED EVENTS
- */
-
-static inline void send_hyp_event(struct task_struct *p, int event)
-{
- if ( !test_and_set_bit(event, &p->hyp_events) )
- guest_schedule_to_run(p);
-}
-
-/* Called on return from (architecture-dependent) entry.S. */
-void do_hyp_events(void);
+#define event_pending(_d) \
+ ((_d)->shared_info->vcpu_data[0].evtchn_upcall_pending && \
+ !(_d)->shared_info->vcpu_data[0].evtchn_upcall_mask)
#endif /* __XEN_EVENT_H__ */
#include <asm/atomic.h>
#include <asm/ptrace.h>
-struct irqaction {
+struct irqaction
+{
void (*handler)(int, void *, struct pt_regs *);
const char *name;
void *dev_id;
};
#include <asm/hardirq.h>
-#include <asm/softirq.h>
enum
{
- HI_SOFTIRQ=0,
- AC_TIMER_SOFTIRQ,
- TASKLET_SOFTIRQ,
- NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ
+ AC_TIMER_SOFTIRQ=0,
+ NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
+ SCHEDULE_SOFTIRQ, /* NB. This must come last or do_softirq() will break! */
+ NR_SOFTIRQS
};
-/* softirq mask and active fields moved to irq_cpustat_t in
- * asm/hardirq.h to get better cache usage. KAO
- */
-
-struct softirq_action
-{
- void (*action)(struct softirq_action *);
- void *data;
-};
+typedef void (*softirq_handler)(void);
asmlinkage void do_softirq(void);
-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
-extern void softirq_init(void);
+extern void open_softirq(int nr, softirq_handler handler);
#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
extern void FASTCALL(raise_softirq(unsigned int nr));
-/* Tasklets --- multithreaded analogue of BHs.
-
- Main feature differing them of generic softirqs: tasklet
- is running only on one CPU simultaneously.
-
- Main feature differing them of BHs: different tasklets
- may be run simultaneously on different CPUs.
-
- Properties:
- * If tasklet_schedule() is called, then tasklet is guaranteed
- to be executed on some cpu at least once after this.
- * If the tasklet is already scheduled, but its excecution is still not
- started, it will be executed only once.
- * If this tasklet is already running on another CPU (or schedule is called
- from tasklet itself), it is rescheduled for later.
- * Tasklet is strictly serialized wrt itself, but not
- wrt another tasklets. If client needs some intertask synchronization,
- he makes it with spinlocks.
- */
-
-struct tasklet_struct
-{
- struct tasklet_struct *next;
- unsigned long state;
- atomic_t count;
- void (*func)(unsigned long);
- unsigned long data;
-};
-
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
-
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
-
-
-enum
-{
- TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
-};
-
-struct tasklet_head
-{
- struct tasklet_struct *list;
-} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
-
-extern struct tasklet_head tasklet_vec[NR_CPUS];
-extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
-
-#ifdef CONFIG_SMP
-static inline int tasklet_trylock(struct tasklet_struct *t)
-{
- return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
- smp_mb__before_clear_bit();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
-#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
-#endif
-
-extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
-
-static inline void tasklet_schedule(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_schedule(t);
-}
-
-extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
-
-static inline void tasklet_hi_schedule(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_hi_schedule(t);
-}
-
-
-static inline void tasklet_disable_nosync(struct tasklet_struct *t)
-{
- atomic_inc(&t->count);
- smp_mb__after_atomic_inc();
-}
-
-static inline void tasklet_disable(struct tasklet_struct *t)
-{
- tasklet_disable_nosync(t);
- tasklet_unlock_wait(t);
- smp_mb();
-}
-
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- if (atomic_dec_and_test(&t->count) &&
- test_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_schedule(t);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- if (atomic_dec_and_test(&t->count) &&
- test_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_hi_schedule(t);
-}
-
-extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_init(struct tasklet_struct *t,
- void (*func)(unsigned long), unsigned long data);
-
#endif
extern hw_irq_controller no_irq_type;
extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
-struct task_struct;
-extern int pirq_guest_unmask(struct task_struct *p);
-extern int pirq_guest_bind(struct task_struct *p, int irq, int will_share);
-extern int pirq_guest_unbind(struct task_struct *p, int irq);
+struct domain;
+extern int pirq_guest_unmask(struct domain *p);
+extern int pirq_guest_bind(struct domain *p, int irq, int will_share);
+extern int pirq_guest_unbind(struct domain *p, int irq);
extern int pirq_guest_bindable(int irq, int will_share);
#endif /* __XEN_IRQ_H__ */
/* arch independent irq_stat fields */
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
-#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count)
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count)
#endif /* __irq_cpustat_h */
/* The following possible uses are context-dependent. */
union {
/* Page is in use and not a zombie: we keep a pointer to its owner. */
- struct task_struct *domain;
+ struct domain *domain;
/* Page is not currently allocated: mask of possibly-tainted TLBs. */
unsigned long cpu_mask;
/* Page is a zombie: this word currently has no use. */
void init_frametable(unsigned long nr_pages);
void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
-struct pfn_info *alloc_domain_page(struct task_struct *p);
+struct pfn_info *alloc_domain_page(struct domain *p);
void free_domain_page(struct pfn_info *page);
int alloc_page_type(struct pfn_info *page, unsigned int type);
static inline int get_page(struct pfn_info *page,
- struct task_struct *domain)
+ struct domain *domain)
{
u32 x, nx, y = page->count_and_flags;
- struct task_struct *p, *np = page->u.domain;
+ struct domain *p, *np = page->u.domain;
do {
x = y;
static inline int get_page_and_type(struct pfn_info *page,
- struct task_struct *domain,
+ struct domain *domain,
u32 type)
{
int rc = get_page(page, domain);
typedef struct schedule_data_st
{
struct list_head runqueue; /* runqueue */
- struct task_struct *curr; /* current task */
- struct task_struct *idle; /* idle task for this cpu */
+ struct domain *curr; /* current task */
+ struct domain *idle; /* idle task for this cpu */
void * sched_priv;
struct ac_timer s_timer; /* scheduling timer */
#ifdef BUCKETS
typedef struct task_slice_st
{
- struct task_struct *task;
+ struct domain *task;
s_time_t time;
} task_slice_t;
unsigned int sched_id; /* ID for this scheduler */
int (*init_scheduler) ();
- int (*alloc_task) (struct task_struct *);
- void (*add_task) (struct task_struct *);
- void (*free_task) (struct task_struct *);
- void (*rem_task) (struct task_struct *);
- void (*wake_up) (struct task_struct *);
- void (*do_block) (struct task_struct *);
+ int (*alloc_task) (struct domain *);
+ void (*add_task) (struct domain *);
+ void (*free_task) (struct domain *);
+ void (*rem_task) (struct domain *);
+ void (*wake_up) (struct domain *);
+ void (*do_block) (struct domain *);
task_slice_t (*do_schedule) (s_time_t);
int (*control) (struct sched_ctl_cmd *);
- int (*adjdom) (struct task_struct *,
+ int (*adjdom) (struct domain *,
struct sched_adjdom_cmd *);
- s32 (*reschedule) (struct task_struct *);
void (*dump_settings) (void);
void (*dump_cpu_state) (int);
- void (*dump_runq_el) (struct task_struct *);
+ void (*dump_runq_el) (struct domain *);
int (*prn_state) (int);
- void (*pause) (struct task_struct *);
+ void (*pause) (struct domain *);
};
/* per CPU scheduler information */
* Wrappers for run-queue management. Must be called with the schedule_lock
* held.
*/
-static inline void __add_to_runqueue_head(struct task_struct * p)
+static inline void __add_to_runqueue_head(struct domain * p)
{
list_add(&p->run_list, &schedule_data[p->processor].runqueue);
}
-static inline void __add_to_runqueue_tail(struct task_struct * p)
+static inline void __add_to_runqueue_tail(struct domain * p)
{
list_add_tail(&p->run_list, &schedule_data[p->processor].runqueue);
}
-static inline void __del_from_runqueue(struct task_struct * p)
+static inline void __del_from_runqueue(struct domain * p)
{
list_del(&p->run_list);
p->run_list.next = NULL;
}
-static inline int __task_on_runqueue(struct task_struct *p)
+static inline int __task_on_runqueue(struct domain *p)
{
return p->run_list.next != NULL;
}
#define next_domain(p) \\
- list_entry((p)->run_list.next, struct task_struct, run_list)
+ list_entry((p)->run_list.next, struct domain, run_list)
static inline int __runqueue_empty(int cpu)
#include <xen/spinlock.h>
-#define _HYP_EVENT_NEED_RESCHED 0
-#define _HYP_EVENT_DIE 1
-
-#define PF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
-#define PF_USEDFPU 1 /* Has this task used the FPU since last save? */
-#define PF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
-#define PF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
-#define PF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
-#define PF_PRIVILEGED 5 /* Is this domain privileged? */
-#define PF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console? */
-#define PF_PHYSDEV 7 /* May this domain do IO to physical devices? */
-
-#define IS_PRIV(_p) (test_bit(PF_PRIVILEGED, &(_p)->flags))
-#define IS_CAPABLE_PHYSDEV(_p) (test_bit(PF_PHYSDEV, &(_p)->flags))
-
-struct task_struct;
+struct domain;
typedef struct event_channel_st
{
union {
struct {
u16 port;
- struct task_struct *dom;
+ struct domain *dom;
} __attribute__ ((packed)) remote; /* state == ECS_CONNECTED */
u16 pirq; /* state == ECS_PIRQ */
u16 virq; /* state == ECS_VIRQ */
} u;
} event_channel_t;
-int init_event_channels(struct task_struct *p);
-void destroy_event_channels(struct task_struct *p);
+int init_event_channels(struct domain *p);
+void destroy_event_channels(struct domain *p);
-struct task_struct
+struct domain
{
/*
* DO NOT CHANGE THE ORDER OF THE FOLLOWING.
* Their offsets are hardcoded in entry.S
*/
- unsigned short processor; /* 00: current processor */
- unsigned short hyp_events; /* 02: pending intra-Xen events */
+ u32 processor; /* 00: current processor */
/* An unsafe pointer into a shared data area. */
shared_info_t *shared_info; /* 04: shared data area */
/* Scheduling. */
struct list_head run_list;
int has_cpu;
- int state; /* current run state */
- int stop_code; /* stop code from OS (if TASK_STOPPED). */
+ int stop_code; /* stop code from OS (if DF_STOPPED). */
int cpupinned; /* true if pinned to curent CPU */
s_time_t lastschd; /* time this domain was last scheduled */
s_time_t lastdeschd; /* time this domain was last descheduled */
s_time_t create_time;
struct thread_struct thread;
- struct task_struct *next_list, *next_hash;
+ struct domain *next_list, *next_hash;
/* Event channel information. */
event_channel_t *event_channel;
unsigned long flags;
atomic_t refcnt;
+ atomic_t pausecnt;
};
-/*
- * domain states
- * TASK_RUNNING: Domain is runable and should be on a run queue
- * TASK_INTERRUPTIBLE: Domain is blocked by may be woken up by an event
- * or expiring timer
- * TASK_UNINTERRUPTIBLE: Domain is blocked but may not be woken up by an
- * arbitrary event or timer.
- * TASK_STOPPED: Domain is stopped.
- * TASK_DYING: Domain is about to cross over to the land of the dead.
- * TASK_PAUSED: Task currently removed from scheduling.
- */
-
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-#define TASK_STOPPED 4
-#define TASK_DYING 8
-#define TASK_PAUSED 16
-#define TASK_CRASHED 32
-
#include <asm/uaccess.h> /* for KERNEL_DS */
#define IDLE0_TASK(_t) \
{ \
processor: 0, \
domain: IDLE_DOMAIN_ID, \
- state: TASK_RUNNING, \
has_cpu: 0, \
mm: IDLE0_MM, \
addr_limit: KERNEL_DS, \
thread: INIT_THREAD, \
- flags: 1<<PF_IDLETASK, \
+ flags: 1<<DF_IDLETASK, \
refcnt: ATOMIC_INIT(1) \
}
-extern struct task_struct idle0_task;
+extern struct domain idle0_task;
-extern struct task_struct *idle_task[NR_CPUS];
+extern struct domain *idle_task[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFFFFFU)
-#define is_idle_task(_p) (test_bit(PF_IDLETASK, &(_p)->flags))
+#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
#include <xen/slab.h>
-void free_task_struct(struct task_struct *p);
-struct task_struct *alloc_task_struct();
-
-#define put_task_struct(_p) \
- if ( atomic_dec_and_test(&(_p)->refcnt) ) release_task(_p)
-#define get_task_struct(_p) \
- atomic_inc(&(_p)->refcnt)
+void free_domain_struct(struct domain *p);
+struct domain *alloc_domain_struct();
-extern struct task_struct *do_createdomain(
+#define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
+#define put_domain(_d) \
+ if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
+static inline int get_domain(struct domain *d)
+{
+ atomic_inc(&d->refcnt);
+ return !(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED);
+}
+
+extern struct domain *do_createdomain(
domid_t dom_id, unsigned int cpu);
-extern int construct_dom0(struct task_struct *p,
+extern int construct_dom0(struct domain *p,
unsigned long alloc_start,
unsigned long alloc_end,
char *image_start, unsigned long image_len,
char *initrd_start, unsigned long initrd_len,
char *cmdline);
-extern int final_setup_guestos(struct task_struct *p, dom0_builddomain_t *);
+extern int final_setup_guestos(struct domain *p, dom0_builddomain_t *);
-struct task_struct *find_domain_by_id(domid_t dom);
-struct task_struct *find_last_domain(void);
-extern void release_task(struct task_struct *);
-extern void __kill_domain(struct task_struct *p);
-extern void kill_domain(void);
-extern long kill_other_domain(domid_t dom, int force);
-extern void stop_domain(u8 reason);
-extern long stop_other_domain(domid_t dom);
+struct domain *find_domain_by_id(domid_t dom);
+struct domain *find_last_domain(void);
+extern void domain_destruct(struct domain *d);
+extern void domain_kill(struct domain *d);
+extern void domain_crash(void);
+extern void domain_suspend(u8 reason);
/* arch/process.c */
-void new_thread(struct task_struct *p,
+void new_thread(struct domain *p,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info);
#define set_current_state(_s) do { current->state = (_s); } while (0)
void scheduler_init(void);
void schedulers_start(void);
-void sched_add_domain(struct task_struct *p);
-int sched_rem_domain(struct task_struct *p);
+void sched_add_domain(struct domain *p);
+void sched_rem_domain(struct domain *p);
long sched_ctl(struct sched_ctl_cmd *);
long sched_adjdom(struct sched_adjdom_cmd *);
int sched_id();
-void sched_pause_sync(struct task_struct *);
void init_idle_task(void);
-void __wake_up(struct task_struct *p);
-void wake_up(struct task_struct *p);
-void reschedule(struct task_struct *p);
-unsigned long __reschedule(struct task_struct *p);
+int domain_wakeup(struct domain *p);
+void __domain_pause(struct domain *p);
-/* NB. Limited entry in Xen. Not for arbitrary use! */
-asmlinkage void __enter_scheduler(void);
-#define schedule() __schedule_not_callable_in_xen()
+void __enter_scheduler(void);
-extern void switch_to(struct task_struct *prev,
- struct task_struct *next);
+extern void switch_to(struct domain *prev,
+ struct domain *next);
-/* A compatibility hack for Linux drivers. */
-#define MAX_SCHEDULE_TIMEOUT 0UL
-static inline long schedule_timeout(long timeout)
-{
- set_current_state(TASK_RUNNING);
- mdelay(timeout*(1000/HZ));
- return 0;
-}
-
-#define signal_pending(_p) \
- ( (_p)->hyp_events || \
- ((_p)->shared_info->vcpu_data[0].evtchn_upcall_pending && \
- !(_p)->shared_info->vcpu_data[0].evtchn_upcall_mask) )
-
void domain_init(void);
int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
void continue_cpu_idle_loop(void);
void continue_nonidle_task(void);
-void sched_prn_state(int state);
/* This task_hash and task_list are protected by the tasklist_lock. */
#define TASK_HASH_SIZE 256
#define TASK_HASH(_id) ((int)(_id)&(TASK_HASH_SIZE-1))
-extern struct task_struct *task_hash[TASK_HASH_SIZE];
-extern struct task_struct *task_list;
+extern struct domain *task_hash[TASK_HASH_SIZE];
+extern struct domain *task_list;
#define for_each_domain(_p) \
for ( (_p) = task_list; (_p) != NULL; (_p) = (_p)->next_list )
-extern void update_process_times(int user);
+#define DF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
+#define DF_USEDFPU 1 /* Has this task used the FPU since last save? */
+#define DF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
+#define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
+#define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
+#define DF_PRIVILEGED 5 /* Is this domain privileged? */
+#define DF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console? */
+#define DF_PHYSDEV 7 /* May this domain do IO to physical devices? */
+
+#define DF_BLOCKED 8 /* Domain is blocked waiting for an event. */
+#define DF_CONTROLPAUSE 9 /* Domain is paused by control software. */
+#define DF_SUSPENDED 10 /* Guest suspended its execution for some reason. */
+#define DF_CRASHED 11 /* Domain crashed inside Xen, cannot continue. */
+#define DF_DYING 12 /* Death rattle. */
+
+static inline int domain_runnable(struct domain *p)
+{
+ return ( (atomic_read(&p->pausecnt) == 0) &&
+ !(p->flags & ((1<<DF_BLOCKED)|(1<<DF_CONTROLPAUSE)|
+ (1<<DF_SUSPENDED)|(1<<DF_CRASHED)|(1<<DF_DYING))) );
+}
+
+/* Returns TRUE if the domain was actually unblocked and woken. */
+static inline int domain_unblock(struct domain *d)
+{
+ if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
+ return domain_wakeup(d);
+ return 0;
+}
+
+static inline void domain_unsuspend(struct domain *d)
+{
+ if ( test_and_clear_bit(DF_SUSPENDED, &d->flags) )
+ (void)domain_wakeup(d);
+}
+
+static inline void domain_controller_pause(struct domain *d)
+{
+ if ( !test_and_set_bit(DF_CONTROLPAUSE, &d->flags) )
+ __domain_pause(d);
+}
+
+static inline void domain_controller_unpause(struct domain *d)
+{
+ if ( test_and_clear_bit(DF_CONTROLPAUSE, &d->flags) )
+ (void)domain_wakeup(d);
+}
+
+static inline void domain_pause(struct domain *d)
+{
+ if ( d == current ) BUG();
+ atomic_inc(&d->pausecnt);
+ __domain_pause(d);
+}
+
+static inline void domain_unpause(struct domain *d)
+{
+ if ( atomic_dec_and_test(&d->pausecnt) )
+ (void)domain_wakeup(d);
+}
+
+
+#define IS_PRIV(_p) (test_bit(DF_PRIVILEGED, &(_p)->flags))
+#define IS_CAPABLE_PHYSDEV(_p) (test_bit(DF_PHYSDEV, &(_p)->flags))
#endif /*_LINUX_SCHED_H */
#define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START+(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
extern void shadow_mode_init(void);
-extern int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc );
+extern int shadow_mode_control( struct domain *p, dom0_shadow_control_t *sc );
extern int shadow_fault( unsigned long va, long error_code );
extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte,
unsigned long *prev_spfn_ptr,
l1_pgentry_t **prev_spl1e_ptr );
extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte );
extern void unshadow_table( unsigned long gpfn, unsigned int type );
-extern int shadow_mode_enable( struct task_struct *p, unsigned int mode );
-extern void shadow_mode_disable( struct task_struct *p );
+extern int shadow_mode_enable( struct domain *p, unsigned int mode );
+extern void shadow_mode_disable( struct domain *p );
extern unsigned long shadow_l2_table(
struct mm_struct *m, unsigned long gpfn );
*/
#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
-#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
-#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
-#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
-#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
-#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
-#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
-#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\
- __r = spin_trylock(lock); \
- if (!__r) local_bh_enable(); \
- __r; })
#ifdef CONFIG_SMP
#include <asm/spinlock.h>